diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h --- a/clang/include/clang/Basic/IdentifierTable.h +++ b/clang/include/clang/Basic/IdentifierTable.h @@ -73,7 +73,7 @@ /// of a pointer to one of these classes. enum { IdentifierInfoAlignment = 8 }; -static constexpr int ObjCOrBuiltinIDBits = 16; +static constexpr int ObjCOrBuiltinIDBits = 17; /// One of these records is kept for each identifier that /// is lexed. This contains information about whether the token was \#define'd, diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -599,9 +599,13 @@ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], - ManualCodegenMask= [{ + ManualCodegenMask = [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[3]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -644,7 +648,11 @@ ManualCodegenMask = [{ { // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 5) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 6) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); Value *NewVL = Ops[2]; @@ -681,7 +689,11 @@ }], ManualCodegenMask= [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 5) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 6) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -701,7 +713,11 @@ }], ManualCodegenMask = [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 5) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 6) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -885,7 +901,10 @@ Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 2]); - Operands.push_back(Ops[2 * NF + 3]); + if (Ops.size() == 2 * NF + 4) + Operands.push_back(Ops[2 * NF + 3]); + else + Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); @@ -958,7 +977,10 @@ Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - Operands.push_back(Ops[2 * NF + 4]); + if (Ops.size() == 2 * NF + 5) + Operands.push_back(Ops[2 * NF + 4]); + else + Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); Value *NewVL = Ops[2 * NF + 2]; assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); @@ -1032,7 +1054,10 @@ Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - Operands.push_back(Ops[2 * NF + 4]); + if (Ops.size() == 2 * NF + 5) + Operands.push_back(Ops[2 * NF + 4]); + else + Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); @@ -1099,7 +1124,10 @@ Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - Operands.push_back(Ops[2 * NF + 4]); + if (Ops.size() == 2 * NF + 5) + Operands.push_back(Ops[2 * NF + 4]); + else + Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); @@ -1318,7 +1346,11 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1348,7 +1380,11 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1395,7 +1431,11 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1427,7 +1467,11 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1462,7 +1506,11 @@ }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + if (Ops.size() == 4) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_AGNOSTIC)); + } else if (Ops.size() == 5) + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -18568,6 +18568,7 @@ Intrinsic::ID ID = Intrinsic::not_intrinsic; unsigned NF = 1; + constexpr unsigned TAIL_AGNOSTIC = 1; // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -531,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vaaddu(op1, op2, vl); } @@ -550,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vaaddu(op1, op2, vl); } @@ -641,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vaaddu(op1, op2, vl); } @@ -804,901 +801,1582 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -803,7 +803,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( @@ -812,7 +812,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( @@ -821,7 +821,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( @@ -839,7 +839,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( @@ -848,7 +848,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( @@ -857,7 +857,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( @@ -866,7 +866,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( @@ -875,7 +875,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( @@ -884,7 +884,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( @@ -893,7 +893,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( @@ -902,7 +902,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( @@ -911,7 +911,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( @@ -929,7 +929,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( @@ -938,7 +938,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( @@ -947,7 +947,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( @@ -956,7 +956,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( @@ -965,7 +965,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( @@ -974,7 +974,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( @@ -983,7 +983,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( @@ -992,7 +992,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( @@ -1001,7 +1001,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( @@ -1019,7 +1019,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( @@ -1028,7 +1028,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( @@ -1037,7 +1037,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( @@ -1046,7 +1046,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( @@ -1055,7 +1055,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( @@ -1064,7 +1064,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( @@ -1073,7 +1073,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( @@ -1082,7 +1082,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( @@ -1091,7 +1091,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( @@ -1109,7 +1109,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( @@ -1118,7 +1118,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( @@ -1127,7 +1127,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( @@ -1136,7 +1136,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( @@ -1145,7 +1145,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( @@ -1154,7 +1154,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( @@ -1163,7 +1163,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( @@ -1172,7 +1172,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( @@ -1181,7 +1181,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( @@ -1199,7 +1199,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( @@ -1208,7 +1208,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( @@ -1217,7 +1217,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( @@ -1226,7 +1226,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( @@ -1235,7 +1235,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( @@ -1244,7 +1244,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( @@ -1253,7 +1253,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( @@ -1262,7 +1262,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( @@ -1271,7 +1271,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( @@ -1289,7 +1289,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( @@ -1298,7 +1298,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( @@ -1307,7 +1307,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( @@ -1316,7 +1316,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( @@ -1325,7 +1325,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( @@ -1334,7 +1334,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( @@ -1343,7 +1343,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( @@ -1352,7 +1352,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( @@ -1361,7 +1361,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( @@ -1379,7 +1379,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( @@ -1388,7 +1388,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( @@ -1397,7 +1397,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( @@ -1406,7 +1406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( @@ -1415,7 +1415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( @@ -1424,7 +1424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( @@ -1433,7 +1433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( @@ -1442,7 +1442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( @@ -1451,7 +1451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( @@ -1460,7 +1460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( @@ -1469,7 +1469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( @@ -1478,7 +1478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( @@ -1487,7 +1487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( @@ -1496,7 +1496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( @@ -1505,7 +1505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( @@ -1514,7 +1514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( @@ -1523,7 +1523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( @@ -1532,7 +1532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( @@ -1541,7 +1541,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( @@ -1550,7 +1550,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( @@ -1559,7 +1559,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( @@ -1568,7 +1568,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( @@ -1577,7 +1577,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( @@ -1586,6 +1586,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoaddei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoaddei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoaddei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoaddei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoaddei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoaddei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoaddei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoaddei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoaddei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoaddei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoaddei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoaddei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoaddei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoaddei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoaddei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoaddei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoaddei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoaddei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoaddei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoandei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoandei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoandei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoandei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoandei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoandei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoandei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoandei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoandei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoandei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoandei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoandei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoandei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoandei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoandei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoandei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoandei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoandei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoandei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoandei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoandei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoandei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoandei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoandei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoandei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoandei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamomaxei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamomaxei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamomaxei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamomaxei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamomaxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamomaxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamomaxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamomaxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamomaxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamomaxei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamomaxei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamomaxei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamomaxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamomaxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamomaxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamomaxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamomaxei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamomaxuei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamomaxuei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamomaxuei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamomaxuei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamomaxuei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamomaxuei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamomaxuei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamomaxuei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamomaxuei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamomaxuei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamominei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamominei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamominei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamominei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamominuei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamominuei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamominuei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamominuei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamominei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamominei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamominei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamominei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamominei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamominei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamominei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamominei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamominei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamominei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamominei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamominei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamominei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamominuei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamominuei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamominuei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamominuei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamominuei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamominuei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamominuei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamominuei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamominuei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamominuei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamominuei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamominuei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamominuei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoorei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoorei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoorei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoorei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoorei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoorei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoorei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoorei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoorei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoorei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoorei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoorei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoorei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoorei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoorei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoorei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoorei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoorei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoorei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoorei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoorei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoorei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoorei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoorei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoorei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoorei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c @@ -11,8 +11,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, - vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -22,8 +21,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, - vint32m1_t value, size_t vl) { +vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -33,8 +31,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, - vint32m2_t value, size_t vl) { +vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -44,8 +41,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex, - vint32m4_t value, size_t vl) { +vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -55,8 +51,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex, - vint32m8_t value, size_t vl) { +vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -66,8 +61,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, - vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -77,8 +71,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, - vint32m1_t value, size_t vl) { +vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -88,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex, - vint32m2_t value, size_t vl) { +vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -99,8 +91,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex, - vint32m4_t value, size_t vl) { +vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -110,8 +101,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex, - vint32m8_t value, size_t vl) { +vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -121,8 +111,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, - vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -132,8 +121,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex, - vint32m1_t value, size_t vl) { +vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -143,8 +131,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex, - vint32m2_t value, size_t vl) { +vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -154,8 +141,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex, - vint32m4_t value, size_t vl) { +vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -165,8 +151,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex, - vint32m8_t value, size_t vl) { +vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -176,8 +161,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, - vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -187,8 +171,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex, - vint32m1_t value, size_t vl) { +vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -198,8 +181,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex, - vint32m2_t value, size_t vl) { +vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -209,8 +191,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex, - vint32m4_t value, size_t vl) { +vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -220,8 +201,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, - vint64m1_t value, size_t vl) { +vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -231,8 +211,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, - vint64m2_t value, size_t vl) { +vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -242,8 +221,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, - vint64m4_t value, size_t vl) { +vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -253,8 +231,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex, - vint64m8_t value, size_t vl) { +vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -264,8 +241,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, - vint64m1_t value, size_t vl) { +vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -275,8 +251,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, - vint64m2_t value, size_t vl) { +vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -286,8 +261,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex, - vint64m4_t value, size_t vl) { +vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -297,8 +271,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex, - vint64m8_t value, size_t vl) { +vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -308,8 +281,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, - vint64m1_t value, size_t vl) { +vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -319,8 +291,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex, - vint64m2_t value, size_t vl) { +vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -330,8 +301,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex, - vint64m4_t value, size_t vl) { +vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -341,8 +311,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex, - vint64m8_t value, size_t vl) { +vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -352,8 +321,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex, - vint64m1_t value, size_t vl) { +vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -363,8 +331,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex, - vint64m2_t value, size_t vl) { +vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -374,8 +341,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex, - vint64m4_t value, size_t vl) { +vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -385,8 +351,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex, - vint64m8_t value, size_t vl) { +vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -396,8 +361,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -407,8 +371,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, - vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -418,8 +381,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, - vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -429,8 +391,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, - vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -440,8 +401,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, - vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -451,8 +411,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -462,8 +421,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, - vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -473,8 +431,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, - vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -484,8 +441,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, - vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -495,8 +451,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, - vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -506,8 +461,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -517,8 +471,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, - vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -528,8 +481,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, - vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -539,8 +491,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, - vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -550,8 +501,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, - vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -561,8 +511,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -572,8 +521,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, - vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -583,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, - vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -594,8 +541,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, - vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -605,8 +551,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, - vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -616,8 +561,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, - vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -627,8 +571,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, - vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -638,8 +581,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, - vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -649,8 +591,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, - vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -660,8 +601,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, - vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -671,8 +611,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, - vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -682,8 +621,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, - vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -693,8 +631,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, - vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -704,8 +641,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, - vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -715,8 +651,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, - vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -726,8 +661,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, - vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -737,8 +671,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, - vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -748,8 +681,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, - vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -759,8 +691,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, - vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -770,8 +701,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, - vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -781,8 +711,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -792,8 +721,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex, - vfloat32m1_t value, size_t vl) { +vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -803,8 +731,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex, - vfloat32m2_t value, size_t vl) { +vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -814,8 +741,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex, - vfloat32m4_t value, size_t vl) { +vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -825,8 +751,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex, - vfloat32m8_t value, size_t vl) { +vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -836,8 +761,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -847,8 +771,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { +vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -858,8 +781,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex, - vfloat32m2_t value, size_t vl) { +vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -869,8 +791,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex, - vfloat32m4_t value, size_t vl) { +vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -880,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex, - vfloat32m8_t value, size_t vl) { +vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -891,8 +811,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -902,8 +821,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex, - vfloat32m1_t value, size_t vl) { +vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -913,8 +831,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex, - vfloat32m2_t value, size_t vl) { +vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -924,8 +841,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex, - vfloat32m4_t value, size_t vl) { +vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -935,8 +851,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex, - vfloat32m8_t value, size_t vl) { +vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -946,8 +861,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -957,8 +871,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex, - vfloat32m1_t value, size_t vl) { +vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -968,8 +881,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex, - vfloat32m2_t value, size_t vl) { +vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -979,8 +891,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex, - vfloat32m4_t value, size_t vl) { +vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -990,8 +901,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -1001,8 +911,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex, - vfloat64m2_t value, size_t vl) { +vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -1012,8 +921,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex, - vfloat64m4_t value, size_t vl) { +vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -1023,8 +931,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex, - vfloat64m8_t value, size_t vl) { +vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei8(base, bindex, value, vl); } @@ -1034,8 +941,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -1045,8 +951,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -1056,8 +961,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex, - vfloat64m4_t value, size_t vl) { +vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -1067,8 +971,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex, - vfloat64m8_t value, size_t vl) { +vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei16(base, bindex, value, vl); } @@ -1078,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -1089,8 +991,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex, - vfloat64m2_t value, size_t vl) { +vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -1100,8 +1001,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex, - vfloat64m4_t value, size_t vl) { +vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -1111,8 +1011,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex, - vfloat64m8_t value, size_t vl) { +vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei32(base, bindex, value, vl); } @@ -1122,8 +1021,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -1133,8 +1031,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex, - vfloat64m2_t value, size_t vl) { +vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -1144,8 +1041,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex, - vfloat64m4_t value, size_t vl) { +vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -1155,8 +1051,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex, - vfloat64m8_t value, size_t vl) { +vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei64(base, bindex, value, vl); } @@ -1166,9 +1061,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint8mf8_t bindex, vint32mf2_t value, - size_t vl) { +vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1178,9 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base, - vuint8mf4_t bindex, vint32m1_t value, - size_t vl) { +vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1190,9 +1081,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base, - vuint8mf2_t bindex, vint32m2_t value, - size_t vl) { +vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1202,9 +1091,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base, - vuint8m1_t bindex, vint32m4_t value, - size_t vl) { +vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1214,9 +1101,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base, - vuint8m2_t bindex, vint32m8_t value, - size_t vl) { +vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1226,9 +1111,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint16mf4_t bindex, vint32mf2_t value, - size_t vl) { +vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1238,9 +1121,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base, - vuint16mf2_t bindex, vint32m1_t value, - size_t vl) { +vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1250,9 +1131,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base, - vuint16m1_t bindex, vint32m2_t value, - size_t vl) { +vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1262,9 +1141,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base, - vuint16m2_t bindex, vint32m4_t value, - size_t vl) { +vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1274,9 +1151,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base, - vuint16m4_t bindex, vint32m8_t value, - size_t vl) { +vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1286,9 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint32mf2_t bindex, vint32mf2_t value, - size_t vl) { +vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1298,9 +1171,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base, - vuint32m1_t bindex, vint32m1_t value, - size_t vl) { +vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1310,9 +1181,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base, - vuint32m2_t bindex, vint32m2_t value, - size_t vl) { +vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1322,9 +1191,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base, - vuint32m4_t bindex, vint32m4_t value, - size_t vl) { +vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1334,9 +1201,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base, - vuint32m8_t bindex, vint32m8_t value, - size_t vl) { +vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1346,9 +1211,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint64m1_t bindex, vint32mf2_t value, - size_t vl) { +vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1358,9 +1221,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base, - vuint64m2_t bindex, vint32m1_t value, - size_t vl) { +vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1370,9 +1231,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base, - vuint64m4_t bindex, vint32m2_t value, - size_t vl) { +vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1382,9 +1241,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base, - vuint64m8_t bindex, vint32m4_t value, - size_t vl) { +vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1394,9 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base, - vuint8mf8_t bindex, vint64m1_t value, - size_t vl) { +vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1406,9 +1261,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base, - vuint8mf4_t bindex, vint64m2_t value, - size_t vl) { +vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1418,9 +1271,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base, - vuint8mf2_t bindex, vint64m4_t value, - size_t vl) { +vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1430,9 +1281,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base, - vuint8m1_t bindex, vint64m8_t value, - size_t vl) { +vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1442,9 +1291,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base, - vuint16mf4_t bindex, vint64m1_t value, - size_t vl) { +vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1454,9 +1301,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base, - vuint16mf2_t bindex, vint64m2_t value, - size_t vl) { +vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1466,9 +1311,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base, - vuint16m1_t bindex, vint64m4_t value, - size_t vl) { +vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1478,9 +1321,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base, - vuint16m2_t bindex, vint64m8_t value, - size_t vl) { +vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1490,9 +1331,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base, - vuint32mf2_t bindex, vint64m1_t value, - size_t vl) { +vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1502,9 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base, - vuint32m1_t bindex, vint64m2_t value, - size_t vl) { +vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1514,9 +1351,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base, - vuint32m2_t bindex, vint64m4_t value, - size_t vl) { +vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1526,9 +1361,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base, - vuint32m4_t bindex, vint64m8_t value, - size_t vl) { +vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1538,9 +1371,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base, - vuint64m1_t bindex, vint64m1_t value, - size_t vl) { +vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1550,9 +1381,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base, - vuint64m2_t bindex, vint64m2_t value, - size_t vl) { +vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1562,9 +1391,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base, - vuint64m4_t bindex, vint64m4_t value, - size_t vl) { +vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1574,9 +1401,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base, - vuint64m8_t bindex, vint64m8_t value, - size_t vl) { +vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1586,9 +1411,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint8mf8_t bindex, vuint32mf2_t value, - size_t vl) { +vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1598,9 +1421,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint8mf4_t bindex, vuint32m1_t value, - size_t vl) { +vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1610,9 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base, - vuint8mf2_t bindex, vuint32m2_t value, - size_t vl) { +vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1622,9 +1441,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base, - vuint8m1_t bindex, vuint32m4_t value, - size_t vl) { +vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1634,9 +1451,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base, - vuint8m2_t bindex, vuint32m8_t value, - size_t vl) { +vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1646,9 +1461,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint16mf4_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1658,9 +1471,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint16mf2_t bindex, vuint32m1_t value, - size_t vl) { +vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1670,9 +1481,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base, - vuint16m1_t bindex, vuint32m2_t value, - size_t vl) { +vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1682,9 +1491,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base, - vuint16m2_t bindex, vuint32m4_t value, - size_t vl) { +vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1694,9 +1501,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base, - vuint16m4_t bindex, vuint32m8_t value, - size_t vl) { +vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1706,9 +1511,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint32mf2_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1718,9 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint32m1_t bindex, vuint32m1_t value, - size_t vl) { +vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1730,9 +1531,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base, - vuint32m2_t bindex, vuint32m2_t value, - size_t vl) { +vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1742,9 +1541,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base, - vuint32m4_t bindex, vuint32m4_t value, - size_t vl) { +vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1754,9 +1551,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base, - vuint32m8_t bindex, vuint32m8_t value, - size_t vl) { +vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1766,9 +1561,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint64m1_t bindex, - vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1778,9 +1571,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint64m2_t bindex, vuint32m1_t value, - size_t vl) { +vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1790,9 +1581,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base, - vuint64m4_t bindex, vuint32m2_t value, - size_t vl) { +vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1802,9 +1591,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base, - vuint64m8_t bindex, vuint32m4_t value, - size_t vl) { +vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1814,9 +1601,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint8mf8_t bindex, vuint64m1_t value, - size_t vl) { +vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1826,9 +1611,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint8mf4_t bindex, vuint64m2_t value, - size_t vl) { +vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1838,9 +1621,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base, - vuint8mf2_t bindex, vuint64m4_t value, - size_t vl) { +vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1850,9 +1631,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base, - vuint8m1_t bindex, vuint64m8_t value, - size_t vl) { +vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -1862,9 +1641,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint16mf4_t bindex, vuint64m1_t value, - size_t vl) { +vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1874,9 +1651,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint16mf2_t bindex, vuint64m2_t value, - size_t vl) { +vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1886,9 +1661,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base, - vuint16m1_t bindex, vuint64m4_t value, - size_t vl) { +vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1898,9 +1671,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base, - vuint16m2_t bindex, vuint64m8_t value, - size_t vl) { +vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -1910,9 +1681,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint32mf2_t bindex, vuint64m1_t value, - size_t vl) { +vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1922,9 +1691,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint32m1_t bindex, vuint64m2_t value, - size_t vl) { +vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1934,9 +1701,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base, - vuint32m2_t bindex, vuint64m4_t value, - size_t vl) { +vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1946,9 +1711,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base, - vuint32m4_t bindex, vuint64m8_t value, - size_t vl) { +vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -1958,9 +1721,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint64m1_t bindex, vuint64m1_t value, - size_t vl) { +vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1970,9 +1731,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint64m2_t bindex, vuint64m2_t value, - size_t vl) { +vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1982,9 +1741,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base, - vuint64m4_t bindex, vuint64m4_t value, - size_t vl) { +vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -1994,9 +1751,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base, - vuint64m8_t bindex, vuint64m8_t value, - size_t vl) { +vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2006,9 +1761,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base, - vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2018,9 +1771,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base, - vuint8mf4_t bindex, vfloat32m1_t value, - size_t vl) { +vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2030,9 +1781,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base, - vuint8mf2_t bindex, vfloat32m2_t value, - size_t vl) { +vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2042,9 +1791,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base, - vuint8m1_t bindex, vfloat32m4_t value, - size_t vl) { +vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2054,9 +1801,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base, - vuint8m2_t bindex, vfloat32m8_t value, - size_t vl) { +vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2066,9 +1811,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base, - vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2078,9 +1821,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base, - vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { +vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2090,9 +1831,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base, - vuint16m1_t bindex, vfloat32m2_t value, - size_t vl) { +vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2102,9 +1841,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base, - vuint16m2_t bindex, vfloat32m4_t value, - size_t vl) { +vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2114,9 +1851,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base, - vuint16m4_t bindex, vfloat32m8_t value, - size_t vl) { +vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2126,9 +1861,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base, - vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2138,9 +1871,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base, - vuint32m1_t bindex, vfloat32m1_t value, - size_t vl) { +vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2150,9 +1881,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base, - vuint32m2_t bindex, vfloat32m2_t value, - size_t vl) { +vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2162,9 +1891,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base, - vuint32m4_t bindex, vfloat32m4_t value, - size_t vl) { +vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2174,9 +1901,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base, - vuint32m8_t bindex, vfloat32m8_t value, - size_t vl) { +vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2186,9 +1911,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base, - vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2198,9 +1921,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base, - vuint64m2_t bindex, vfloat32m1_t value, - size_t vl) { +vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2210,9 +1931,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base, - vuint64m4_t bindex, vfloat32m2_t value, - size_t vl) { +vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2222,9 +1941,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base, - vuint64m8_t bindex, vfloat32m4_t value, - size_t vl) { +vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2234,9 +1951,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base, - vuint8mf8_t bindex, vfloat64m1_t value, - size_t vl) { +vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2246,9 +1961,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base, - vuint8mf4_t bindex, vfloat64m2_t value, - size_t vl) { +vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2258,9 +1971,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base, - vuint8mf2_t bindex, vfloat64m4_t value, - size_t vl) { +vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2270,9 +1981,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base, - vuint8m1_t bindex, vfloat64m8_t value, - size_t vl) { +vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei8(mask, base, bindex, value, vl); } @@ -2282,9 +1991,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base, - vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2294,9 +2001,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base, - vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2306,9 +2011,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base, - vuint16m1_t bindex, vfloat64m4_t value, - size_t vl) { +vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2318,9 +2021,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base, - vuint16m2_t bindex, vfloat64m8_t value, - size_t vl) { +vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei16(mask, base, bindex, value, vl); } @@ -2330,9 +2031,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base, - vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2342,9 +2041,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base, - vuint32m1_t bindex, vfloat64m2_t value, - size_t vl) { +vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2354,9 +2051,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base, - vuint32m2_t bindex, vfloat64m4_t value, - size_t vl) { +vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2366,9 +2061,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base, - vuint32m4_t bindex, vfloat64m8_t value, - size_t vl) { +vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei32(mask, base, bindex, value, vl); } @@ -2378,9 +2071,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base, - vuint64m1_t bindex, vfloat64m1_t value, - size_t vl) { +vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2390,9 +2081,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base, - vuint64m2_t bindex, vfloat64m2_t value, - size_t vl) { +vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2402,9 +2091,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base, - vuint64m4_t bindex, vfloat64m4_t value, - size_t vl) { +vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } @@ -2414,8 +2101,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base, - vuint64m8_t bindex, vfloat64m8_t value, - size_t vl) { +vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vamoswapei64(mask, base, bindex, value, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -40,7 +40,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -50,7 +50,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -60,7 +60,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -70,7 +70,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -80,7 +80,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -130,7 +130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -140,7 +140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -160,7 +160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -170,7 +170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -230,7 +230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -240,7 +240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -250,7 +250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -260,7 +260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -320,7 +320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -330,7 +330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -350,7 +350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -400,7 +400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -410,7 +410,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -420,7 +420,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -440,7 +440,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -490,7 +490,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -500,7 +500,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -510,7 +510,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -530,7 +530,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -580,7 +580,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei8(base, bindex, value, vl); } @@ -590,7 +590,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -600,7 +600,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -620,7 +620,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei16(base, bindex, value, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei32(base, bindex, value, vl); } @@ -670,7 +670,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -680,7 +680,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -690,7 +690,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -700,7 +700,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei64(base, bindex, value, vl); } @@ -710,7 +710,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -760,7 +760,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -770,7 +770,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -780,7 +780,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -790,7 +790,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -800,7 +800,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -830,7 +830,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -850,7 +850,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +vint32m8_t test_vamoxorei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +vint32mf2_t test_vamoxorei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -870,7 +870,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +vint32m1_t test_vamoxorei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -880,7 +880,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +vint32m2_t test_vamoxorei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -890,7 +890,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +vint32m4_t test_vamoxorei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -920,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -940,7 +940,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -950,7 +950,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -960,7 +960,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -970,7 +970,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -980,7 +980,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1010,7 +1010,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +vint64m1_t test_vamoxorei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1030,7 +1030,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +vint64m2_t test_vamoxorei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1040,7 +1040,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +vint64m4_t test_vamoxorei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1050,7 +1050,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +vint64m8_t test_vamoxorei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1070,7 +1070,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1100,7 +1100,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1130,7 +1130,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1140,7 +1140,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1160,7 +1160,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1190,7 +1190,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +vuint32m8_t test_vamoxorei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +vuint32mf2_t test_vamoxorei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1220,7 +1220,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +vuint32m1_t test_vamoxorei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1230,7 +1230,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +vuint32m2_t test_vamoxorei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +vuint32m4_t test_vamoxorei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1250,7 +1250,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1280,7 +1280,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei8(mask, base, bindex, value, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1310,7 +1310,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1320,7 +1320,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei16(mask, base, bindex, value, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1340,7 +1340,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei32(mask, base, bindex, value, vl); } @@ -1370,7 +1370,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +vuint64m1_t test_vamoxorei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +vuint64m2_t test_vamoxorei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +vuint64m4_t test_vamoxorei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } @@ -1400,7 +1400,7 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +vuint64m8_t test_vamoxorei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vamoxorei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vand(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vand_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -531,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vasubu(op1, op2, vl); } @@ -550,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vasubu(op1, op2, vl); } @@ -641,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vasubu(op1, op2, vl); } @@ -804,901 +801,1582 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { +vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { +vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { +vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -37,7 +37,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { +vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -46,7 +46,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { +vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -55,7 +55,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { +vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -64,7 +64,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { +vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { +vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -82,7 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { +vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -91,7 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { +vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { +vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -109,7 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { +vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -118,7 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { +vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -127,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { +vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -136,7 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { +vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -145,7 +145,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { +vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -154,7 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { +vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { +vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { +vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -181,7 +181,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { +vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { +vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -199,7 +199,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { +vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -208,7 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { +vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -217,7 +217,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { +vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -226,7 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { +vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -235,7 +235,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { +vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -244,7 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { +vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { +vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -262,7 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { +vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -271,7 +271,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { +vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { +vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -289,7 +289,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { +vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { +vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -307,7 +307,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { +vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -316,7 +316,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { +vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -325,7 +325,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { +vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { +vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { +vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -352,7 +352,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { +vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -361,7 +361,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { +vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { +vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -379,7 +379,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { +vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -388,7 +388,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { +vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -397,7 +397,61 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { +vuint64m8_t test_vcompress_vm_u64m8(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { + return vcompress(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -406,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { +vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -415,7 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { +vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -424,7 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { +vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -433,7 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { +vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -442,7 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { +vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -451,7 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { +vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -460,7 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { +vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -469,7 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { +vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { return vcompress(mask, dest, src, vl); } @@ -478,8 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { +vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { return vcompress(mask, dest, src, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -1,16 +1,70 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) { + return vfabs(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { +vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) { return vfabs(op1, vl); } @@ -19,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { +vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) { return vfabs(op1, vl); } @@ -28,7 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { +vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) { return vfabs(op1, vl); } @@ -37,7 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { +vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) { return vfabs(op1, vl); } @@ -46,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { +vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) { return vfabs(op1, vl); } @@ -55,7 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { +vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) { return vfabs(op1, vl); } @@ -64,7 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { +vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) { return vfabs(op1, vl); } @@ -73,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { +vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) { return vfabs(op1, vl); } @@ -82,17 +136,71 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfabs(op1, vl); } +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( @@ -100,8 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( @@ -109,8 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( @@ -118,8 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( @@ -127,8 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( @@ -136,8 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( @@ -145,8 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( @@ -154,8 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( @@ -163,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfabs_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfabs_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfabs_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfabs_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfabs_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfabs_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, size_t ta) { + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, size_t ta) { return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -11,7 +11,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -38,7 +38,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -47,7 +47,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -56,7 +56,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -65,7 +65,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -74,7 +74,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -92,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -101,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -119,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -128,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -146,7 +146,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -155,7 +155,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -164,7 +164,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -272,16 +272,286 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { +vfloat16mf4_t test_vfadd_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -290,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16mf4_t test_vfadd_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -299,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { +vfloat16mf2_t test_vfadd_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -308,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16mf2_t test_vfadd_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -317,7 +587,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { +vfloat16m1_t test_vfadd_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -326,7 +596,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16m1_t test_vfadd_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -335,7 +605,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { +vfloat16m2_t test_vfadd_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -344,7 +614,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16m2_t test_vfadd_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -353,7 +623,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { +vfloat16m4_t test_vfadd_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -362,7 +632,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16m4_t test_vfadd_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -371,7 +641,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { +vfloat16m8_t test_vfadd_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -380,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { +vfloat16m8_t test_vfadd_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -389,7 +659,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { +vfloat32mf2_t test_vfadd_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -398,7 +668,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { +vfloat32mf2_t test_vfadd_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -407,7 +677,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { +vfloat32m1_t test_vfadd_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -416,7 +686,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { +vfloat32m1_t test_vfadd_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -425,7 +695,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { +vfloat32m2_t test_vfadd_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -434,7 +704,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { +vfloat32m2_t test_vfadd_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -443,7 +713,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { +vfloat32m4_t test_vfadd_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -452,7 +722,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { +vfloat32m4_t test_vfadd_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -461,7 +731,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { +vfloat32m8_t test_vfadd_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -470,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { +vfloat32m8_t test_vfadd_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -479,7 +749,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { +vfloat64m1_t test_vfadd_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -488,7 +758,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { +vfloat64m1_t test_vfadd_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -497,7 +767,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { +vfloat64m2_t test_vfadd_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -506,7 +776,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { +vfloat64m2_t test_vfadd_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -515,7 +785,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { +vfloat64m4_t test_vfadd_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -524,7 +794,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { +vfloat64m4_t test_vfadd_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -533,7 +803,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { +vfloat64m8_t test_vfadd_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } @@ -542,7 +812,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { +vfloat64m8_t test_vfadd_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { + return vfclass(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -86,13 +140,66 @@ return vfclass(op1, vl); } +// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -101,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -111,8 +217,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -121,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -131,8 +235,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -141,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -151,8 +253,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -161,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } @@ -171,7 +271,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -1,10 +1,334 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { + return vfcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { + return vfcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { + return vfcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { + return vfcvt_f(src, vl); +} + // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -491,195 +815,499 @@ return vfcvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( @@ -687,348 +1315,1123 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { + return vfcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_x_f_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_x_f_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_x_f_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_x_f_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_x_f_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_x_f_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_xu_f_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_xu_f_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_xu_f_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_xu_f_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_x_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_x_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_x_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_x_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl, size_t ta) { + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_x_f_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_x_f_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_x_f_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_x_f_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_xu_f_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_xu_f_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_xu_f_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_xu_f_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vint32mf2_t src, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint32m1_t src, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint32m2_t src, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint32m4_t src, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vint32m8_t src, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint32mf2_t src, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint32m1_t src, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint32m2_t src, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint32m4_t src, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vuint32m8_t src, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vint64m1_t src, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vint64m2_t src, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vint64m4_t src, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vint64m8_t src, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vuint64m1_t src, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vuint64m2_t src, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vuint64m4_t src, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vuint64m8_t src, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl, size_t ta) { return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfdiv(op1, op2, vl); } @@ -176,15 +275,121 @@ return vfdiv(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( @@ -192,9 +397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( @@ -202,10 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( @@ -213,9 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( @@ -223,10 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( @@ -234,9 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( @@ -244,10 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( @@ -255,9 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( @@ -265,10 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( @@ -276,9 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( @@ -286,10 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( @@ -297,9 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( @@ -307,10 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( @@ -318,9 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( @@ -328,10 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( @@ -339,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( @@ -349,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( @@ -360,7 +541,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfdiv_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfdiv_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfdiv_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfdiv_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfdiv_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfdiv_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfdiv_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfdiv_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfdiv_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfdiv_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfdiv_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfdiv_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfdiv_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfdiv_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfdiv_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfdiv_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c @@ -9,49 +9,63 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b1(vbool1_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b2(vbool2_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b4(vbool4_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b8(vbool8_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b16(vbool16_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b32(vbool32_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); } +long test_vfirst_m_b64(vbool64_t op1, size_t vl) { + return vfirst(op1, vl); +} // CHECK-RV64-LABEL: @test_vfirst_m_b1_m( // CHECK-RV64-NEXT: entry: @@ -115,3 +129,4 @@ long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { return vfirst(mask, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfmax(op1, op2, vl); } @@ -176,15 +275,121 @@ return vfmax(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( @@ -192,9 +397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( @@ -202,10 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( @@ -213,9 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( @@ -223,10 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( @@ -234,9 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( @@ -244,10 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( @@ -255,9 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( @@ -265,10 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( @@ -276,9 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( @@ -286,10 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( @@ -297,9 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( @@ -307,10 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( @@ -318,9 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( @@ -328,10 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( @@ -339,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( @@ -349,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( @@ -360,7 +541,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmax_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmax_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmax_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmax_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmax_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmax_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmax_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmax_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmax_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmax_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmax_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmax_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmax_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmax_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmax_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmax_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfmin(op1, op2, vl); } @@ -176,15 +275,121 @@ return vfmin(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( @@ -192,9 +397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( @@ -202,10 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( @@ -213,9 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( @@ -223,10 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( @@ -234,9 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( @@ -244,10 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( @@ -255,9 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( @@ -265,10 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( @@ -276,9 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( @@ -286,10 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( @@ -297,9 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( @@ -307,10 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( @@ -318,9 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( @@ -328,10 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( @@ -339,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( @@ -349,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( @@ -360,7 +541,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmin_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmin_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmin_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmin_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmin_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmin_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmin_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmin_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmin_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmin_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmin_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmin_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmin_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmin_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmin_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmin_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfmul(op1, op2, vl); } @@ -176,15 +275,121 @@ return vfmul(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( @@ -192,9 +397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( @@ -202,10 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( @@ -213,9 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( @@ -223,10 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( @@ -234,9 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( @@ -244,10 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( @@ -255,9 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( @@ -265,10 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( @@ -276,9 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( @@ -286,10 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( @@ -297,9 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( @@ -307,10 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( @@ -318,9 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( @@ -328,10 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( @@ -339,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( @@ -349,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( @@ -360,7 +541,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmul_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmul_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmul_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmul_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmul_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmul_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmul_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmul_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmul_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmul_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmul_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmul_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmul_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmul_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmul_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmul_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c @@ -1,24 +1,134 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_s_f_f16mf4(vfloat16mf4_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_s_f_f16mf2(vfloat16mf2_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_s_f_f16m1(vfloat16m1_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_s_f_f16m2(vfloat16m2_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_s_f_f16m4(vfloat16m4_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8(vfloat16m8_t dest, _Float16 src, size_t vl) { + return vfmv_s(dest, src, vl); +} + // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dest, float src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( @@ -26,15 +136,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dest, float src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( @@ -42,15 +154,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dest, float src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( @@ -58,15 +172,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dest, float src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32( @@ -74,15 +190,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dest, float src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( @@ -90,15 +208,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dest, double src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( @@ -106,15 +226,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dest, double src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( @@ -122,15 +244,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dest, double src, size_t vl) { + return vfmv_s(dest, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( @@ -138,13 +262,16 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { + return vfmv_f(src); +} // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dest, double src, size_t vl) { + return vfmv_s(dest, src, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -1,10 +1,226 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { + return vfncvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { + return vfncvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu(src, vl); +} + // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -185,6 +401,186 @@ return vfncvt_rtz_xu(src, vl); } +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return vfncvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f(src, vl); +} + // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -473,96 +869,301 @@ return vfncvt_rod_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, - vint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, - vint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( @@ -570,441 +1171,1429 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return vfncvt_rod_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl, size_t ta) { + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_x_f_w_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_x_f_w_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_x_f_w_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_x_f_w_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, - vuint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_x_w_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_x_w_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_x_w_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_f_w_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_f_w_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_f_w_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl, size_t ta) { + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, - vint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, - vuint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, - vuint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vint64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl, size_t ta) { return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, - vfloat32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl, size_t ta) { return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, - vfloat32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl, size_t ta) { return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, - vfloat32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl, size_t ta) { return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -1,16 +1,70 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) { + return vfneg(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { +vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) { return vfneg(op1, vl); } @@ -19,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { +vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) { return vfneg(op1, vl); } @@ -28,7 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { +vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) { return vfneg(op1, vl); } @@ -37,7 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { +vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) { return vfneg(op1, vl); } @@ -46,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { +vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) { return vfneg(op1, vl); } @@ -55,7 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { +vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) { return vfneg(op1, vl); } @@ -64,7 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { +vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) { return vfneg(op1, vl); } @@ -73,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { +vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) { return vfneg(op1, vl); } @@ -82,17 +136,71 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfneg(op1, vl); } +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( @@ -100,8 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( @@ -109,8 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( @@ -118,8 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( @@ -127,8 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( @@ -136,8 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( @@ -145,8 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( @@ -154,8 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( @@ -163,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfneg_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfneg_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfneg_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfneg_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfneg_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfneg_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfneg_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfneg_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfneg_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfneg_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfneg_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, size_t ta) { + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, size_t ta) { return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) @@ -86,14 +140,67 @@ return vfrdiv(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( @@ -101,9 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( @@ -111,9 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( @@ -121,9 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( @@ -131,9 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( @@ -141,9 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( @@ -151,9 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( @@ -161,9 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( @@ -171,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfrdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrdiv_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrdiv_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrdiv_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrdiv_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrdiv_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrdiv_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrdiv_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrdiv_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrdiv_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrdiv_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrdiv_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrdiv_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrdiv_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrdiv_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { + return vfrec7(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -86,14 +140,67 @@ return vfrec7(op1, vl); } +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( @@ -101,9 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( @@ -111,9 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( @@ -121,9 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( @@ -131,9 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( @@ -141,9 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( @@ -151,9 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( @@ -161,9 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( @@ -171,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrec7_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, size_t ta) { return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrec7_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrec7_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrec7_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrec7_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrec7_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrec7_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrec7_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrec7_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrec7_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrec7_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, size_t ta) { + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -86,14 +140,67 @@ return vfrsqrt7(op1, vl); } +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( @@ -101,9 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( @@ -111,9 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( @@ -121,9 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( @@ -131,9 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( @@ -141,9 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( @@ -151,9 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( @@ -161,9 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( @@ -171,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, size_t ta) { return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsqrt7_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsqrt7_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsqrt7_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsqrt7_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsqrt7_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsqrt7_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsqrt7_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsqrt7_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsqrt7_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, size_t ta) { + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) @@ -86,14 +140,67 @@ return vfrsub(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( @@ -101,9 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( @@ -111,9 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( @@ -121,9 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( @@ -131,9 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( @@ -141,9 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( @@ -151,9 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( @@ -161,9 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( @@ -171,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsub_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsub_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsub_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsub_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsub_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsub_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsub_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsub_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsub_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsub_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsub_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsub_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsub_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsub_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnj(op1, op2, vl); } @@ -176,13 +275,120 @@ return vfsgnj(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -200,8 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -219,8 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -238,8 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -257,8 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -276,8 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -295,8 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -314,8 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -333,8 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnjn(op1, op2, vl); } @@ -347,13 +545,120 @@ return vfsgnjn(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -371,8 +676,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -390,8 +694,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -409,8 +712,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -428,8 +730,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -447,8 +748,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -466,8 +766,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -485,8 +784,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -504,8 +802,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnjx(op1, op2, vl); } @@ -518,15 +815,121 @@ return vfsgnjx(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( @@ -534,9 +937,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( @@ -544,10 +946,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( @@ -555,9 +955,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( @@ -565,10 +964,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( @@ -576,9 +973,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( @@ -586,10 +982,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( @@ -597,9 +991,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( @@ -607,10 +1000,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( @@ -618,9 +1009,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( @@ -628,10 +1018,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( @@ -639,9 +1027,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( @@ -649,10 +1036,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( @@ -660,9 +1045,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( @@ -670,10 +1054,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( @@ -681,9 +1063,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( @@ -691,10 +1072,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( @@ -702,387 +1081,1357 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnj(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, - size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjn(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfsgnjx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnj_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnj_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnj_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnj_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnj_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnj_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnj_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsgnj_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnj_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsgnj_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnj_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnj_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnj_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnj_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnj_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnj_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfsgnjn_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfsgnjn_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -1,17 +1,70 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down(src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, - size_t vl) { +vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) { return vfslide1down(src, value, vl); } @@ -20,8 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, - size_t vl) { +vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, size_t vl) { return vfslide1down(src, value, vl); } @@ -30,8 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, - size_t vl) { +vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, size_t vl) { return vfslide1down(src, value, vl); } @@ -40,8 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, - size_t vl) { +vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, size_t vl) { return vfslide1down(src, value, vl); } @@ -50,8 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, - size_t vl) { +vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, size_t vl) { return vfslide1down(src, value, vl); } @@ -60,8 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, - size_t vl) { +vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, size_t vl) { return vfslide1down(src, value, vl); } @@ -70,8 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, - size_t vl) { +vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, size_t vl) { return vfslide1down(src, value, vl); } @@ -80,8 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, - size_t vl) { +vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, size_t vl) { return vfslide1down(src, value, vl); } @@ -90,21 +136,71 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, size_t vl) { return vfslide1down(src, value, vl); } +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat32mf2_t src, float value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( @@ -112,11 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, - vfloat32m1_t maskedoff, - vfloat32m1_t src, float value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( @@ -124,11 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, - vfloat32m2_t maskedoff, - vfloat32m2_t src, float value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( @@ -136,10 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t src, float value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( @@ -147,10 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t src, float value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( @@ -158,11 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, - vfloat64m1_t maskedoff, - vfloat64m1_t src, double value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( @@ -170,11 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, - vfloat64m2_t maskedoff, - vfloat64m2_t src, double value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( @@ -182,11 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, - vfloat64m4_t maskedoff, - vfloat64m4_t src, double value, - size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( @@ -194,8 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { + return vfslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1down_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl, size_t ta) { return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1down_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1down_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1down_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1down_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1down_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfslide1down_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfslide1down_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfslide1down_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfslide1down_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfslide1down_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfslide1down_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfslide1down_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfslide1down_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl, size_t ta) { + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -1,17 +1,70 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up(src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, - size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) { return vfslide1up(src, value, vl); } @@ -20,8 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, - size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, size_t vl) { return vfslide1up(src, value, vl); } @@ -30,8 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, - size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, size_t vl) { return vfslide1up(src, value, vl); } @@ -40,8 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, - size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, size_t vl) { return vfslide1up(src, value, vl); } @@ -50,8 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, - size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, size_t vl) { return vfslide1up(src, value, vl); } @@ -60,8 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, - size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, size_t vl) { return vfslide1up(src, value, vl); } @@ -70,8 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, - size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, size_t vl) { return vfslide1up(src, value, vl); } @@ -80,8 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, - size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, size_t vl) { return vfslide1up(src, value, vl); } @@ -90,21 +136,71 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, size_t vl) { return vfslide1up(src, value, vl); } +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat32mf2_t src, float value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( @@ -112,10 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t src, float value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( @@ -123,10 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t src, float value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( @@ -134,10 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t src, float value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( @@ -145,10 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t src, float value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( @@ -156,10 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t src, double value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( @@ -167,10 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t src, double value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( @@ -178,10 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t src, double value, - size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( @@ -189,8 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { + return vfslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1up_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl, size_t ta) { return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1up_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1up_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1up_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1up_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1up_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfslide1up_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfslide1up_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfslide1up_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfslide1up_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfslide1up_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfslide1up_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfslide1up_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfslide1up_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl, size_t ta) { + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -1,10 +1,64 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -86,14 +140,67 @@ return vfsqrt(op1, vl); } +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( @@ -101,9 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( @@ -111,9 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( @@ -121,9 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( @@ -131,9 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( @@ -141,9 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( @@ -151,9 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( @@ -161,9 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( @@ -171,7 +271,142 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsqrt_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, size_t ta) { return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsqrt_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsqrt_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsqrt_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsqrt_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsqrt_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsqrt_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsqrt_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsqrt_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsqrt_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsqrt_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, size_t ta) { + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -67,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -86,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -105,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -124,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -143,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -162,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsub(op1, op2, vl); } @@ -176,15 +275,121 @@ return vfsub(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( @@ -192,9 +397,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( @@ -202,10 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( @@ -213,9 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( @@ -223,10 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( @@ -234,9 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( @@ -244,10 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( @@ -255,9 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( @@ -265,10 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( @@ -276,9 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( @@ -286,10 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( @@ -297,9 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( @@ -307,10 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( @@ -318,9 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( @@ -328,10 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( @@ -339,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( @@ -349,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( @@ -360,7 +541,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vfsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsub_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsub_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsub_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsub_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsub_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsub_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsub_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsub_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsub_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsub_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsub_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsub_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsub_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsub_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsub_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsub_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, size_t ta) { + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -1,17 +1,196 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfwadd_vv(op1, op2, vl); } @@ -29,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { return vfwadd_wv(op1, op2, vl); } @@ -48,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfwadd_vv(op1, op2, vl); } @@ -67,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { return vfwadd_wv(op1, op2, vl); } @@ -86,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfwadd_vv(op1, op2, vl); } @@ -105,8 +280,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { return vfwadd_wv(op1, op2, vl); } @@ -124,8 +298,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfwadd_vv(op1, op2, vl); } @@ -143,8 +316,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { return vfwadd_wv(op1, op2, vl); } @@ -157,15 +329,193 @@ return vfwadd_wf(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( @@ -173,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( @@ -183,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( @@ -194,9 +541,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( @@ -204,10 +550,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( @@ -215,9 +559,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( @@ -225,10 +568,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( @@ -236,9 +577,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( @@ -246,10 +586,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( @@ -257,9 +595,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( @@ -267,10 +604,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( @@ -278,9 +613,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( @@ -288,10 +622,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( @@ -299,9 +631,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( @@ -309,10 +640,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( @@ -320,7 +649,331 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl, size_t ta) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -1,10 +1,298 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { + return vfwcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { + return vfwcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { + return vfwcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { + return vfwcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { + return vfwcvt_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { + return vfwcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { + return vfwcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { + return vfwcvt_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu(src, vl); +} + // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -95,6 +383,51 @@ return vfwcvt_f(src, vl); } +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { + return vfwcvt_f(src, vl); +} + // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -347,387 +680,1353 @@ return vfwcvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vuint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfwcvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfwcvt_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfwcvt_f(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_x_f_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_x_f_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_x_f_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_x_f_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl, size_t ta) { + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_x_f_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_x_f_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, - vuint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, - vuint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, - vuint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vint32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vint32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vint32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vint32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vuint32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vuint32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vuint32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vuint32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl, size_t ta) { return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -1,17 +1,106 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfwmul(op1, op2, vl); } @@ -29,8 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfwmul(op1, op2, vl); } @@ -48,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfwmul(op1, op2, vl); } @@ -67,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfwmul(op1, op2, vl); } @@ -81,15 +167,103 @@ return vfwmul(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( @@ -97,9 +271,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( @@ -107,10 +280,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( @@ -118,9 +289,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( @@ -128,10 +298,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( @@ -139,9 +307,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( @@ -149,10 +316,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( @@ -160,7 +325,169 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -1,17 +1,196 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfwsub_vv(op1, op2, vl); } @@ -29,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { return vfwsub_wv(op1, op2, vl); } @@ -48,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfwsub_vv(op1, op2, vl); } @@ -67,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { return vfwsub_wv(op1, op2, vl); } @@ -86,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfwsub_vv(op1, op2, vl); } @@ -105,8 +280,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { return vfwsub_wv(op1, op2, vl); } @@ -124,8 +298,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfwsub_vv(op1, op2, vl); } @@ -143,8 +316,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { return vfwsub_wv(op1, op2, vl); } @@ -157,15 +329,193 @@ return vfwsub_wf(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( @@ -173,9 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( @@ -183,10 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( @@ -194,9 +541,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( @@ -204,10 +550,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( @@ -215,9 +559,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( @@ -225,10 +568,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( @@ -236,9 +577,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( @@ -246,10 +586,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( @@ -257,9 +595,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( @@ -267,10 +604,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( @@ -278,9 +613,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( @@ -288,10 +622,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( @@ -299,9 +631,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( @@ -309,10 +640,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( @@ -320,7 +649,331 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl, size_t ta) { return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl, size_t ta) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl, size_t ta) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -221,6 +221,60 @@ return vget_u16m4(src, 0); } +// CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src, size_t index) { + return vget_f16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src, size_t index) { + return vget_f16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src, size_t index) { + return vget_f16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src, size_t index) { + return vget_f16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src, size_t index) { + return vget_f16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src, size_t index) { + return vget_f16m4(src, 0); +} + // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( @@ -22,7 +22,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( @@ -32,7 +32,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( @@ -42,7 +42,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( @@ -52,7 +52,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( @@ -62,7 +62,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( @@ -82,7 +82,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( @@ -92,7 +92,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( @@ -102,7 +102,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( @@ -112,7 +112,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( @@ -122,7 +122,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( @@ -132,7 +132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( @@ -142,7 +142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( @@ -152,7 +152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( @@ -162,7 +162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( @@ -212,7 +212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( @@ -222,7 +222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( @@ -232,7 +232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( @@ -242,7 +242,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( @@ -262,7 +262,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( @@ -282,7 +282,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( @@ -292,7 +292,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle8(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( @@ -302,7 +302,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( @@ -312,7 +312,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( @@ -322,7 +322,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( @@ -332,7 +332,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( @@ -342,7 +342,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( @@ -352,7 +352,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( @@ -362,7 +362,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( @@ -372,7 +372,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( @@ -382,7 +382,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( @@ -392,7 +392,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( @@ -402,7 +402,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( @@ -412,7 +412,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( @@ -422,7 +422,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( @@ -432,7 +432,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( @@ -442,7 +442,67 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( @@ -452,7 +512,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( @@ -462,7 +522,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( @@ -472,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( @@ -482,7 +542,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( @@ -492,7 +552,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle32(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( @@ -502,7 +562,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( @@ -512,7 +572,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( @@ -522,7 +582,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); + return vle64(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( @@ -532,5 +592,596 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vle8_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vle8_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vle8_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vle8_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vle8_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vle16_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vle16_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl, size_t ta) { return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl, size_t ta) { + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl, size_t ta) { + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl, size_t ta) { + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl, size_t ta) { + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -941,6 +941,141 @@ return vlmul_ext_u64m8(op1); } +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { + return vlmul_ext_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { + return vlmul_ext_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { + return vlmul_ext_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { + return vlmul_ext_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { + return vlmul_ext_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { + return vlmul_ext_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { + return vlmul_ext_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { + return vlmul_ext_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { + return vlmul_ext_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { + return vlmul_ext_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { + return vlmul_ext_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { + return vlmul_ext_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { + return vlmul_ext_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { + return vlmul_ext_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { + return vlmul_ext_f16m8(op1); +} + // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) @@ -2021,6 +2156,141 @@ return vlmul_trunc_u64m4(op1); } +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { + return vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { + return vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { + return vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { + return vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { + return vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { + return vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { + return vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { + return vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { + return vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { + return vlmul_trunc_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { + return vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { + return vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { + return vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { + return vlmul_trunc_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { + return vlmul_trunc_f16m4(op1); +} + // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) @@ -2164,3 +2434,4 @@ vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { return vlmul_trunc_f64m4(op1); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -1202,7 +1202,6 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - // return vloxei64(base, bindex, vl); } @@ -1566,6 +1565,216 @@ return vloxei64(base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1923,7 +2132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( @@ -1933,7 +2142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( @@ -1943,7 +2152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( @@ -1953,7 +2162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( @@ -1963,7 +2172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( @@ -1973,7 +2182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( @@ -1983,7 +2192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( @@ -1993,7 +2202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( @@ -2003,7 +2212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( @@ -2013,7 +2222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( @@ -2023,7 +2232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( @@ -2033,7 +2242,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( @@ -2043,7 +2252,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( @@ -2053,7 +2262,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( @@ -2063,7 +2272,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( @@ -2073,7 +2282,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( @@ -2083,7 +2292,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( @@ -2093,7 +2302,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( @@ -2103,7 +2312,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( @@ -2113,7 +2322,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( @@ -2123,7 +2332,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( @@ -2133,7 +2342,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( @@ -2143,7 +2352,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( @@ -2153,7 +2362,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( @@ -2163,7 +2372,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( @@ -2173,7 +2382,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( @@ -2183,7 +2392,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( @@ -2193,7 +2402,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( @@ -2203,7 +2412,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( @@ -2213,7 +2422,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( @@ -2223,7 +2432,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( @@ -2233,7 +2442,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( @@ -2243,7 +2452,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( @@ -2253,7 +2462,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( @@ -2263,7 +2472,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( @@ -2273,7 +2482,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( @@ -2283,7 +2492,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( @@ -2293,7 +2502,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( @@ -2303,7 +2512,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( @@ -2313,7 +2522,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( @@ -2323,7 +2532,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( @@ -2333,7 +2542,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( @@ -2343,7 +2552,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( @@ -2353,7 +2562,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( @@ -2363,7 +2572,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( @@ -2373,7 +2582,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( @@ -2383,7 +2592,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( @@ -2393,7 +2602,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( @@ -2403,7 +2612,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( @@ -2413,7 +2622,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( @@ -2423,7 +2632,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( @@ -2433,7 +2642,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( @@ -2443,7 +2652,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( @@ -2453,7 +2662,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( @@ -2463,7 +2672,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( @@ -2473,7 +2682,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( @@ -2483,7 +2692,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( @@ -2493,7 +2702,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( @@ -2503,7 +2712,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( @@ -2513,7 +2722,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( @@ -2523,7 +2732,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( @@ -2533,7 +2742,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( @@ -2543,7 +2752,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( @@ -2553,7 +2762,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( @@ -2563,7 +2772,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( @@ -2573,7 +2782,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( @@ -2583,7 +2792,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( @@ -2593,7 +2802,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( @@ -2603,7 +2812,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( @@ -2613,7 +2822,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( @@ -2623,7 +2832,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( @@ -2633,7 +2842,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( @@ -2643,7 +2852,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( @@ -2653,7 +2862,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( @@ -2663,7 +2872,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( @@ -2673,7 +2882,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( @@ -2683,7 +2892,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( @@ -2693,7 +2902,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( @@ -2703,7 +2912,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( @@ -2713,7 +2922,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( @@ -2723,7 +2932,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( @@ -2733,7 +2942,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( @@ -2743,7 +2952,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( @@ -2753,7 +2962,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( @@ -2763,7 +2972,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( @@ -2773,7 +2982,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( @@ -2783,7 +2992,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( @@ -2793,7 +3002,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( @@ -2803,7 +3012,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( @@ -2813,7 +3022,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( @@ -2823,7 +3032,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( @@ -2833,7 +3042,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( @@ -2843,7 +3052,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( @@ -2853,7 +3062,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( @@ -2863,7 +3072,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( @@ -2873,7 +3082,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( @@ -2883,7 +3092,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( @@ -2893,7 +3102,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( @@ -2903,7 +3112,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( @@ -2913,7 +3122,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( @@ -2923,7 +3132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( @@ -2933,7 +3142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( @@ -2943,7 +3152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( @@ -2953,7 +3162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( @@ -2963,7 +3172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( @@ -2973,7 +3182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( @@ -2983,7 +3192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( @@ -2993,7 +3202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( @@ -3003,7 +3212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( @@ -3013,7 +3222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( @@ -3023,7 +3232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( @@ -3033,7 +3242,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( @@ -3043,7 +3252,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( @@ -3053,7 +3262,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( @@ -3063,7 +3272,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( @@ -3073,7 +3282,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( @@ -3083,7 +3292,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( @@ -3093,7 +3302,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( @@ -3103,7 +3312,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( @@ -3113,7 +3322,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( @@ -3123,7 +3332,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( @@ -3133,7 +3342,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( @@ -3143,7 +3352,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( @@ -3153,7 +3362,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( @@ -3163,7 +3372,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( @@ -3173,7 +3382,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( @@ -3183,7 +3392,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( @@ -3193,7 +3402,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( @@ -3203,7 +3412,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( @@ -3213,7 +3422,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( @@ -3223,7 +3432,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( @@ -3233,7 +3442,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( @@ -3243,7 +3452,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( @@ -3253,7 +3462,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( @@ -3263,7 +3472,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( @@ -3273,7 +3482,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( @@ -3283,7 +3492,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( @@ -3293,7 +3502,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( @@ -3303,7 +3512,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( @@ -3313,7 +3522,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( @@ -3323,7 +3532,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( @@ -3333,7 +3542,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( @@ -3343,7 +3552,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( @@ -3353,7 +3562,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( @@ -3363,7 +3572,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( @@ -3373,7 +3582,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( @@ -3383,7 +3592,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( @@ -3393,7 +3602,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( @@ -3403,7 +3612,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( @@ -3413,7 +3622,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( @@ -3423,7 +3632,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( @@ -3433,7 +3642,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( @@ -3443,7 +3652,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( @@ -3453,7 +3662,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( @@ -3463,7 +3672,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( @@ -3473,7 +3682,217 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( @@ -3483,7 +3902,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( @@ -3493,7 +3912,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( @@ -3503,7 +3922,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( @@ -3513,7 +3932,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( @@ -3523,7 +3942,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( @@ -3533,7 +3952,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( @@ -3543,7 +3962,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( @@ -3553,7 +3972,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( @@ -3563,7 +3982,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( @@ -3573,7 +3992,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( @@ -3583,7 +4002,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( @@ -3593,7 +4012,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( @@ -3603,7 +4022,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( @@ -3613,7 +4032,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( @@ -3623,7 +4042,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( @@ -3633,7 +4052,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( @@ -3643,7 +4062,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( @@ -3653,7 +4072,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( @@ -3663,7 +4082,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( @@ -3673,7 +4092,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( @@ -3683,7 +4102,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( @@ -3693,7 +4112,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( @@ -3703,7 +4122,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( @@ -3713,7 +4132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( @@ -3723,7 +4142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( @@ -3733,7 +4152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( @@ -3743,7 +4162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( @@ -3753,7 +4172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( @@ -3763,7 +4182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( @@ -3773,7 +4192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( @@ -3783,7 +4202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( @@ -3793,7 +4212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( @@ -3803,7 +4222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( @@ -3813,7 +4232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vloxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( @@ -3823,6 +4242,2126 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei8_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei8_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei8_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei8_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei8_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei8_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vloxei8_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei16_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei16_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei16_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei16_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei16_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei16_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei32_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei32_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei32_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei32_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei32_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei64_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei64_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei64_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei64_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei8_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei8_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei8_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei8_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei8_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei8_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei16_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei16_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei16_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei16_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei16_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei16_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei32_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei32_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei32_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei32_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei32_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei64_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei64_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei64_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei64_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei8_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei8_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei8_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei8_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei8_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei16_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei16_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei16_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei16_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei16_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei32_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei32_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei32_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei32_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei32_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei64_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei64_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei64_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei64_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei8_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei8_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei8_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei8_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei16_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei16_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei16_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei16_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei32_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei32_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei32_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei32_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei64_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei64_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei64_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei64_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei8_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei8_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei8_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei8_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei8_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei8_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vloxei8_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei16_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei16_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei16_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei16_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei16_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei16_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei32_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei32_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei32_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei32_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei32_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei64_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei64_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei64_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei64_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei8_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei8_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei8_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei8_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei8_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei8_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei16_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei16_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei16_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei16_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei16_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei16_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei32_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei32_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei32_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei32_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei32_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei64_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei64_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei64_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei64_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei8_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei8_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei8_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei8_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei8_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei16_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei16_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei16_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei16_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei16_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei32_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei32_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei32_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei32_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei32_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei64_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei64_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei64_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei64_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei8_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei8_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei8_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei8_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei16_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei16_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei16_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei16_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei32_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei32_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei32_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei32_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei64_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei64_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei64_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei64_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei8_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei8_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei8_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei8_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei8_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei16_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei16_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei16_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei16_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei16_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei32_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei32_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei32_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei32_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei32_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei64_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei64_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei64_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei64_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei8_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei8_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei8_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei8_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei16_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei16_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei16_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei16_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei32_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei32_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei32_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei32_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei64_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei64_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei64_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -32,7 +32,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -49,7 +49,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -68,7 +68,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -89,7 +89,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -112,7 +112,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -222,7 +222,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -355,7 +355,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -378,7 +378,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -431,7 +431,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -448,7 +448,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -467,7 +467,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -511,7 +511,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -536,7 +536,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -549,7 +549,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -564,7 +564,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -581,7 +581,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -594,7 +594,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -607,7 +607,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -622,7 +622,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -639,7 +639,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -658,7 +658,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -679,7 +679,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -702,7 +702,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -727,7 +727,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -755,7 +755,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -772,7 +772,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -791,7 +791,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -812,7 +812,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -835,7 +835,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -888,7 +888,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -905,7 +905,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -924,7 +924,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -945,7 +945,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -968,7 +968,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1006,7 +1006,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -1021,7 +1021,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -1057,7 +1057,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1078,7 +1078,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1139,7 +1139,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -1154,7 +1154,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -1184,7 +1184,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -1197,7 +1197,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -1212,7 +1212,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1229,7 +1229,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1248,7 +1248,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1269,7 +1269,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1292,7 +1292,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -1345,7 +1345,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1381,7 +1381,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1402,7 +1402,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1463,7 +1463,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -1478,7 +1478,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1514,7 +1514,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1535,7 +1535,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1583,7 +1583,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1596,7 +1596,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -1611,7 +1611,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1628,7 +1628,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1647,7 +1647,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1668,7 +1668,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1691,7 +1691,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1716,7 +1716,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1729,7 +1729,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -1744,7 +1744,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1761,7 +1761,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1774,7 +1774,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -1789,7 +1789,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -1806,7 +1806,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -1825,7 +1825,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1846,7 +1846,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1869,7 +1869,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1894,7 +1894,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1907,7 +1907,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -1922,7 +1922,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -1939,7 +1939,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -1958,7 +1958,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1979,7 +1979,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2002,7 +2002,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2027,7 +2027,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2040,7 +2040,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -2055,7 +2055,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -2072,7 +2072,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -2091,7 +2091,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2112,7 +2112,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2135,7 +2135,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2160,7 +2160,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2173,7 +2173,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -2188,7 +2188,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -2205,7 +2205,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -2224,7 +2224,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2245,7 +2245,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2268,7 +2268,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2293,7 +2293,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2306,7 +2306,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -2321,7 +2321,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2338,7 +2338,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2357,7 +2357,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2378,7 +2378,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2401,7 +2401,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2426,7 +2426,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2439,7 +2439,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -2454,7 +2454,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2471,7 +2471,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2490,7 +2490,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2511,7 +2511,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2534,7 +2534,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2559,7 +2559,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2572,7 +2572,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -2587,7 +2587,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2604,7 +2604,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2623,7 +2623,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2644,7 +2644,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2667,7 +2667,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2692,7 +2692,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2705,7 +2705,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -2720,7 +2720,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2737,7 +2737,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2750,7 +2750,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -2763,7 +2763,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -2778,7 +2778,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -2795,7 +2795,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -2814,7 +2814,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2835,7 +2835,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2858,7 +2858,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2896,7 +2896,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -2911,7 +2911,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -2928,7 +2928,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -2947,7 +2947,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2968,7 +2968,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2991,7 +2991,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3016,7 +3016,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3029,7 +3029,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -3044,7 +3044,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -3061,7 +3061,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -3080,7 +3080,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3101,7 +3101,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3124,7 +3124,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3149,7 +3149,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3162,7 +3162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -3177,7 +3177,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -3194,7 +3194,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -3207,7 +3207,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -3220,7 +3220,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -3235,7 +3235,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3252,7 +3252,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3271,7 +3271,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3292,7 +3292,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3315,7 +3315,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3340,7 +3340,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3353,7 +3353,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -3368,7 +3368,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3385,7 +3385,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3404,7 +3404,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3425,7 +3425,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3448,7 +3448,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3473,7 +3473,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3486,7 +3486,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -3501,7 +3501,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3518,7 +3518,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3537,7 +3537,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3558,7 +3558,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3581,7 +3581,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3606,7 +3606,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3619,7 +3619,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -3634,7 +3634,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3651,7 +3651,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3664,7 +3664,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -3677,7 +3677,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -3692,7 +3692,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3709,7 +3709,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3728,7 +3728,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3749,7 +3749,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3772,7 +3772,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3797,7 +3797,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3810,7 +3810,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -3825,7 +3825,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3842,7 +3842,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3861,7 +3861,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3882,7 +3882,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3905,7 +3905,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3930,7 +3930,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3943,7 +3943,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -3958,7 +3958,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3975,7 +3975,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3994,7 +3994,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4015,7 +4015,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4038,7 +4038,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4063,7 +4063,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4076,7 +4076,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -4091,7 +4091,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -4108,7 +4108,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -4121,7 +4121,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -4136,7 +4136,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4153,7 +4153,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4172,7 +4172,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4193,7 +4193,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4216,7 +4216,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4241,7 +4241,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4254,7 +4254,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -4269,7 +4269,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4286,7 +4286,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4305,7 +4305,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4326,7 +4326,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4349,7 +4349,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4374,7 +4374,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4387,7 +4387,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -4402,7 +4402,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4419,7 +4419,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4432,7 +4432,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -4445,7 +4445,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -4460,7 +4460,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4477,7 +4477,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4496,7 +4496,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4517,7 +4517,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4540,7 +4540,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4565,7 +4565,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4578,7 +4578,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -4593,7 +4593,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4610,7 +4610,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4629,7 +4629,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4650,7 +4650,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4673,7 +4673,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4698,7 +4698,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4711,7 +4711,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -4726,7 +4726,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4743,7 +4743,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4756,7 +4756,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -4769,7 +4769,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -4784,7 +4784,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -4801,7 +4801,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -4820,7 +4820,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4841,7 +4841,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4864,7 +4864,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4889,7 +4889,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4902,7 +4902,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -4917,7 +4917,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -4934,7 +4934,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -4953,7 +4953,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4974,7 +4974,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4997,7 +4997,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5022,7 +5022,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5035,7 +5035,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -5050,7 +5050,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5067,7 +5067,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5080,7 +5080,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -5093,7 +5093,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -5108,7 +5108,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5125,7 +5125,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5144,7 +5144,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5165,7 +5165,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5188,7 +5188,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5213,7 +5213,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5226,7 +5226,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -5241,7 +5241,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5258,7 +5258,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5277,7 +5277,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5298,7 +5298,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5321,7 +5321,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5346,7 +5346,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5359,7 +5359,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -5374,7 +5374,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5391,7 +5391,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5404,7 +5404,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -5417,7 +5417,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -5432,7 +5432,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -5449,7 +5449,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -5468,7 +5468,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5489,7 +5489,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5512,7 +5512,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5537,7 +5537,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5550,7 +5550,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -5565,7 +5565,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -5582,7 +5582,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -5595,7 +5595,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -5608,7 +5608,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -5623,7 +5623,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -5640,7 +5640,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -5659,7 +5659,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5680,7 +5680,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5703,7 +5703,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5728,7 +5728,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5741,7 +5741,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -5756,7 +5756,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -5773,7 +5773,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -5786,7 +5786,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -5799,7 +5799,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -5814,7 +5814,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5831,7 +5831,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5850,7 +5850,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5871,7 +5871,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5894,7 +5894,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5919,7 +5919,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5932,7 +5932,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -5947,7 +5947,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5964,7 +5964,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5977,7 +5977,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -5990,7 +5990,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -6005,7 +6005,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -6022,7 +6022,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -6041,7 +6041,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6062,7 +6062,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6085,7 +6085,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6110,7 +6110,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6123,7 +6123,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -6138,7 +6138,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -6155,7 +6155,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -6168,7 +6168,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -6181,7 +6181,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6196,7 +6196,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6213,7 +6213,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6232,7 +6232,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6253,7 +6253,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6276,7 +6276,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6301,7 +6301,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6314,7 +6314,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6329,7 +6329,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6346,7 +6346,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6365,7 +6365,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6386,7 +6386,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6409,7 +6409,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6434,7 +6434,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6447,7 +6447,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6462,7 +6462,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6479,7 +6479,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6498,7 +6498,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6519,7 +6519,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6542,7 +6542,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6567,7 +6567,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6580,7 +6580,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6595,7 +6595,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6612,7 +6612,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6631,7 +6631,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6652,7 +6652,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6675,7 +6675,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6700,7 +6700,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6713,7 +6713,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6728,7 +6728,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6745,7 +6745,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6758,7 +6758,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -6771,7 +6771,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -6786,7 +6786,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -6803,7 +6803,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -6822,7 +6822,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6843,7 +6843,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6866,7 +6866,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6891,7 +6891,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6904,7 +6904,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -6919,7 +6919,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -6936,7 +6936,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -6955,7 +6955,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6976,7 +6976,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6999,7 +6999,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7024,7 +7024,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7037,7 +7037,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -7052,7 +7052,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7069,7 +7069,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7088,7 +7088,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7109,7 +7109,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7132,7 +7132,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7157,7 +7157,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7170,7 +7170,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -7185,7 +7185,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7202,7 +7202,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7221,7 +7221,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7242,7 +7242,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7265,7 +7265,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7290,7 +7290,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7303,7 +7303,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -7318,7 +7318,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7335,7 +7335,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7348,7 +7348,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -7361,7 +7361,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -7376,7 +7376,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7393,7 +7393,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7412,7 +7412,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7433,7 +7433,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7456,7 +7456,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7481,7 +7481,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7494,7 +7494,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -7509,7 +7509,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7526,7 +7526,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7545,7 +7545,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7566,7 +7566,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7589,7 +7589,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7614,7 +7614,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7627,7 +7627,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -7642,7 +7642,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7659,7 +7659,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7678,7 +7678,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7699,7 +7699,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7722,7 +7722,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7747,7 +7747,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7760,7 +7760,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -7775,7 +7775,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7792,7 +7792,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7811,7 +7811,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7832,7 +7832,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7855,7 +7855,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7880,7 +7880,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7893,7 +7893,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -7908,7 +7908,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7925,7 +7925,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7938,7 +7938,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -7953,7 +7953,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -7970,7 +7970,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -7989,7 +7989,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8010,7 +8010,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8033,7 +8033,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8058,7 +8058,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8071,7 +8071,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -8086,7 +8086,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8103,7 +8103,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8122,7 +8122,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8143,7 +8143,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8166,7 +8166,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8191,7 +8191,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8204,7 +8204,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -8219,7 +8219,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8236,7 +8236,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8255,7 +8255,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8276,7 +8276,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8299,7 +8299,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8324,7 +8324,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8337,7 +8337,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -8352,7 +8352,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8369,7 +8369,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8388,7 +8388,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8409,7 +8409,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8432,7 +8432,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8457,7 +8457,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8470,7 +8470,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -8485,7 +8485,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8502,7 +8502,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8521,7 +8521,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8542,7 +8542,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8565,7 +8565,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8590,7 +8590,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8603,7 +8603,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -8618,7 +8618,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8635,7 +8635,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8654,7 +8654,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8675,7 +8675,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8698,7 +8698,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8723,7 +8723,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8736,7 +8736,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -8751,7 +8751,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8768,7 +8768,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8787,7 +8787,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8808,7 +8808,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8831,7 +8831,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8856,7 +8856,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8869,7 +8869,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -8884,7 +8884,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8901,7 +8901,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8914,7 +8914,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -8927,7 +8927,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -8942,7 +8942,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -8959,7 +8959,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -8978,7 +8978,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8999,7 +8999,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9022,7 +9022,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9047,7 +9047,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9060,7 +9060,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -9075,7 +9075,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9092,7 +9092,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9111,7 +9111,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9132,7 +9132,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9155,7 +9155,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9180,7 +9180,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9193,7 +9193,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -9208,7 +9208,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9225,7 +9225,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9244,7 +9244,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9265,7 +9265,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9288,7 +9288,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9313,7 +9313,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9326,7 +9326,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -9341,7 +9341,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9358,7 +9358,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9371,7 +9371,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -9384,7 +9384,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -9399,7 +9399,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9416,7 +9416,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9435,7 +9435,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9456,7 +9456,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9479,7 +9479,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9504,7 +9504,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9517,7 +9517,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -9532,7 +9532,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9549,7 +9549,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9568,7 +9568,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9589,7 +9589,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9612,7 +9612,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9637,7 +9637,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9650,7 +9650,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -9665,7 +9665,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9682,7 +9682,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9701,7 +9701,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9722,7 +9722,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9745,7 +9745,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9770,7 +9770,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9783,7 +9783,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -9798,7 +9798,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9815,7 +9815,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9828,7 +9828,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -9841,7 +9841,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -9856,7 +9856,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -9873,7 +9873,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -9892,7 +9892,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9913,7 +9913,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9936,7 +9936,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9961,7 +9961,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9974,7 +9974,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -9989,7 +9989,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10006,7 +10006,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10025,7 +10025,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10046,7 +10046,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10069,7 +10069,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10094,7 +10094,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10107,7 +10107,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -10122,7 +10122,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10139,7 +10139,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10158,7 +10158,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10179,7 +10179,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10202,7 +10202,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10227,7 +10227,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10240,7 +10240,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -10255,7 +10255,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10272,7 +10272,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10285,7 +10285,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -10300,7 +10300,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10317,7 +10317,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10336,7 +10336,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10357,7 +10357,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10380,7 +10380,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10405,7 +10405,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10418,7 +10418,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -10433,7 +10433,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10450,7 +10450,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10469,7 +10469,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10490,7 +10490,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10513,7 +10513,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10538,7 +10538,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10551,7 +10551,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -10566,7 +10566,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10583,7 +10583,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10596,7 +10596,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -10609,7 +10609,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -10624,7 +10624,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10641,7 +10641,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10660,7 +10660,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10681,7 +10681,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10704,7 +10704,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10729,7 +10729,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10742,7 +10742,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -10757,7 +10757,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10774,7 +10774,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10793,7 +10793,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10814,7 +10814,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10837,7 +10837,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10862,7 +10862,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10875,7 +10875,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -10890,7 +10890,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10907,7 +10907,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10920,7 +10920,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -10933,7 +10933,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -10948,7 +10948,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -10965,7 +10965,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -10984,7 +10984,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11005,7 +11005,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11028,7 +11028,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11053,7 +11053,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11066,7 +11066,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -11081,7 +11081,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11098,7 +11098,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -11117,7 +11117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11138,7 +11138,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11161,7 +11161,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11186,7 +11186,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11199,7 +11199,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -11214,7 +11214,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11231,7 +11231,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -11244,7 +11244,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -11257,7 +11257,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -11272,7 +11272,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11289,7 +11289,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11308,7 +11308,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11329,7 +11329,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11352,7 +11352,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11377,7 +11377,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11390,7 +11390,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -11405,7 +11405,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11422,7 +11422,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11441,7 +11441,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11462,7 +11462,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11485,7 +11485,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11510,7 +11510,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11523,7 +11523,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -11538,7 +11538,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11555,7 +11555,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11568,7 +11568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -11581,7 +11581,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -11596,7 +11596,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -11613,7 +11613,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -11632,7 +11632,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11653,7 +11653,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11676,7 +11676,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11701,7 +11701,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11714,7 +11714,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -11729,7 +11729,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -11746,7 +11746,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -11759,7 +11759,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -11772,7 +11772,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -11787,7 +11787,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -11804,7 +11804,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -11823,7 +11823,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11844,7 +11844,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11867,7 +11867,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11892,7 +11892,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11905,7 +11905,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -11920,7 +11920,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -11937,7 +11937,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -11950,7 +11950,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -11963,7 +11963,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -11978,7 +11978,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11995,7 +11995,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -12014,7 +12014,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12035,7 +12035,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12058,7 +12058,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12083,7 +12083,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12096,7 +12096,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -12111,7 +12111,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -12128,7 +12128,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -12141,7 +12141,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -12154,7 +12154,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -12169,7 +12169,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -12186,7 +12186,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -12205,7 +12205,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12226,7 +12226,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12249,7 +12249,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12274,7 +12274,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12287,7 +12287,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -12302,7 +12302,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -12319,7 +12319,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -12332,7 +12332,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -12345,7 +12345,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -12360,7 +12360,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12377,7 +12377,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12396,7 +12396,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12417,7 +12417,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12440,7 +12440,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12465,7 +12465,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12478,7 +12478,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -12493,7 +12493,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12510,7 +12510,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12529,7 +12529,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12550,7 +12550,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12573,7 +12573,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12598,7 +12598,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12611,7 +12611,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -12626,7 +12626,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12643,7 +12643,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12662,7 +12662,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12683,7 +12683,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12706,7 +12706,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12731,7 +12731,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12744,7 +12744,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -12759,7 +12759,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12776,7 +12776,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12789,7 +12789,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -12802,7 +12802,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -12817,7 +12817,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -12834,7 +12834,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -12853,7 +12853,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12874,7 +12874,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12897,7 +12897,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12922,7 +12922,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12935,7 +12935,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -12950,7 +12950,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -12967,7 +12967,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -12986,7 +12986,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13007,7 +13007,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13030,7 +13030,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13055,7 +13055,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13068,7 +13068,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -13083,7 +13083,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -13100,7 +13100,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -13119,7 +13119,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13140,7 +13140,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13163,7 +13163,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13188,7 +13188,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13201,7 +13201,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -13216,7 +13216,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -13233,7 +13233,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -13246,7 +13246,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -13259,7 +13259,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -13274,7 +13274,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13291,7 +13291,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13310,7 +13310,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13331,7 +13331,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13354,7 +13354,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13379,7 +13379,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13392,7 +13392,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -13407,7 +13407,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13424,7 +13424,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13443,7 +13443,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13464,7 +13464,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13487,7 +13487,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13512,7 +13512,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13525,7 +13525,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -13540,7 +13540,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13557,7 +13557,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13576,7 +13576,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13597,7 +13597,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13620,7 +13620,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13645,7 +13645,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13658,7 +13658,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -13673,7 +13673,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13690,7 +13690,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13703,7 +13703,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -13716,7 +13716,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -13731,7 +13731,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -13748,7 +13748,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -13767,7 +13767,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13788,7 +13788,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13811,7 +13811,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13836,7 +13836,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13849,7 +13849,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -13864,7 +13864,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -13881,7 +13881,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -13900,7 +13900,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13921,7 +13921,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13944,7 +13944,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13969,7 +13969,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13982,7 +13982,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -13997,7 +13997,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -14014,7 +14014,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -14033,7 +14033,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14054,7 +14054,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14077,7 +14077,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14102,7 +14102,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14115,7 +14115,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -14130,7 +14130,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -14147,7 +14147,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -14160,7 +14160,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -14175,7 +14175,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14192,7 +14192,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14211,7 +14211,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14232,7 +14232,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14255,7 +14255,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14280,7 +14280,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14293,7 +14293,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -14308,7 +14308,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14325,7 +14325,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14344,7 +14344,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14365,7 +14365,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14388,7 +14388,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14413,7 +14413,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14426,7 +14426,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -14441,7 +14441,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14458,7 +14458,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14471,7 +14471,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -14484,7 +14484,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -14499,7 +14499,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14516,7 +14516,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14535,7 +14535,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14556,7 +14556,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14579,7 +14579,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14604,7 +14604,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14617,7 +14617,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -14632,7 +14632,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14649,7 +14649,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14668,7 +14668,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14689,7 +14689,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14712,7 +14712,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14737,7 +14737,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14750,7 +14750,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -14765,7 +14765,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14782,7 +14782,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14795,7 +14795,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -14808,7 +14808,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -14823,7 +14823,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -14840,7 +14840,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -14859,7 +14859,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14880,7 +14880,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14903,7 +14903,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14928,7 +14928,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14941,7 +14941,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -14956,7 +14956,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -14973,7 +14973,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -14992,7 +14992,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15013,7 +15013,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15036,7 +15036,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15061,7 +15061,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15074,7 +15074,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -15089,7 +15089,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -15106,7 +15106,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -15119,7 +15119,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -15132,7 +15132,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -15147,7 +15147,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15164,7 +15164,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15183,7 +15183,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15204,7 +15204,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15227,7 +15227,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15252,7 +15252,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15265,7 +15265,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -15280,7 +15280,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15297,7 +15297,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15316,7 +15316,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15337,7 +15337,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15360,7 +15360,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15385,7 +15385,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15398,7 +15398,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -15413,7 +15413,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15430,7 +15430,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15443,7 +15443,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -15456,7 +15456,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -15471,7 +15471,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -15488,7 +15488,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -15507,7 +15507,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15528,7 +15528,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15551,7 +15551,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15576,7 +15576,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15589,7 +15589,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -15604,7 +15604,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -15621,7 +15621,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -15634,7 +15634,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } @@ -15647,7 +15647,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -15662,7 +15662,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -15679,7 +15679,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -15698,7 +15698,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15719,7 +15719,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15742,7 +15742,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15767,7 +15767,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15780,7 +15780,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -15795,7 +15795,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -15812,7 +15812,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -15825,7 +15825,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } @@ -15838,7 +15838,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -15853,7 +15853,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -15870,7 +15870,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -15889,7 +15889,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15910,7 +15910,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15933,7 +15933,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15958,7 +15958,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vloxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15971,7 +15971,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -15986,7 +15986,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -16003,7 +16003,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -16016,7 +16016,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } @@ -16029,7 +16029,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -16044,7 +16044,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -16061,7 +16061,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -16080,7 +16080,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -16101,7 +16101,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -16124,7 +16124,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -16149,7 +16149,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vloxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -16162,7 +16162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } @@ -16177,7 +16177,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -16194,7 +16194,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -16207,10 +16207,16213 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -16220,7 +32423,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16235,7 +32438,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16252,7 +32455,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16271,7 +32474,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16292,7 +32495,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16315,7 +32518,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16340,7 +32543,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16353,7 +32556,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16368,7 +32571,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16385,7 +32588,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16404,7 +32607,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16425,7 +32628,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16448,7 +32651,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16473,7 +32676,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16486,7 +32689,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16501,7 +32704,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16518,7 +32721,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16537,7 +32740,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16558,7 +32761,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16581,7 +32784,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16606,7 +32809,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16619,7 +32822,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16634,7 +32837,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16651,7 +32854,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16670,7 +32873,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16691,7 +32894,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16714,7 +32917,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16739,7 +32942,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16752,7 +32955,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16767,7 +32970,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16784,7 +32987,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16797,7 +33000,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16810,7 +33013,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16825,7 +33028,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16842,7 +33045,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16861,7 +33064,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16882,7 +33085,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16905,7 +33108,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16930,7 +33133,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16943,7 +33146,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16958,7 +33161,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16975,7 +33178,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16994,7 +33197,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17015,7 +33218,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17038,7 +33241,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17063,7 +33266,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17076,7 +33279,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17091,7 +33294,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17108,7 +33311,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17127,7 +33330,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17148,7 +33351,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17171,7 +33374,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17196,7 +33399,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17209,7 +33412,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17224,7 +33427,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17241,7 +33444,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17260,7 +33463,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17281,7 +33484,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17304,7 +33507,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17329,7 +33532,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17342,7 +33545,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17357,7 +33560,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17374,7 +33577,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17387,7 +33590,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17400,7 +33603,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17415,7 +33618,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17432,7 +33635,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17451,7 +33654,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17472,7 +33675,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17495,7 +33698,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17520,7 +33723,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17533,7 +33736,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17548,7 +33751,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17565,7 +33768,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17584,7 +33787,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17605,7 +33808,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17628,7 +33831,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17653,7 +33856,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17666,7 +33869,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17681,7 +33884,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17698,7 +33901,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17717,7 +33920,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17738,7 +33941,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17761,7 +33964,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17786,7 +33989,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17799,7 +34002,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17814,7 +34017,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17831,7 +34034,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17850,7 +34053,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17871,7 +34074,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17894,7 +34097,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17919,7 +34122,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17932,7 +34135,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17947,7 +34150,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17964,7 +34167,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17977,7 +34180,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17992,7 +34195,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18009,7 +34212,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18028,7 +34231,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18049,7 +34252,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18072,7 +34275,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18097,7 +34300,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18110,7 +34313,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18125,7 +34328,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18142,7 +34345,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18161,7 +34364,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18182,7 +34385,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18205,7 +34408,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18230,7 +34433,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18243,7 +34446,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18258,7 +34461,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18275,7 +34478,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18294,7 +34497,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18315,7 +34518,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18338,7 +34541,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18363,7 +34566,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18376,7 +34579,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18391,7 +34594,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18408,7 +34611,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18427,7 +34630,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18448,7 +34651,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18471,7 +34674,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18496,7 +34699,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18509,7 +34712,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18524,7 +34727,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18541,7 +34744,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18560,7 +34763,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18581,7 +34784,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18604,7 +34807,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18629,7 +34832,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18642,7 +34845,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18657,7 +34860,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18674,7 +34877,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18693,7 +34896,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18714,7 +34917,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18737,7 +34940,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18762,7 +34965,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18775,7 +34978,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18790,7 +34993,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18807,7 +35010,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18826,7 +35029,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18847,7 +35050,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18870,7 +35073,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18895,7 +35098,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18908,7 +35111,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18923,7 +35126,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18940,7 +35143,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18953,7 +35156,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18966,7 +35169,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18981,7 +35184,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18998,7 +35201,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19017,7 +35220,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19038,7 +35241,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19061,7 +35264,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19086,7 +35289,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19099,7 +35302,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19114,7 +35317,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19131,7 +35334,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19150,7 +35353,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19171,7 +35374,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19194,7 +35397,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19219,7 +35422,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19232,7 +35435,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19247,7 +35450,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19264,7 +35467,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19283,7 +35486,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19304,7 +35507,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19327,7 +35530,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19352,7 +35555,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19365,7 +35568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19380,7 +35583,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19397,7 +35600,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19410,7 +35613,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19423,7 +35626,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19438,7 +35641,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19455,7 +35658,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19474,7 +35677,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19495,7 +35698,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19518,7 +35721,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19543,7 +35746,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19556,7 +35759,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19571,7 +35774,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19588,7 +35791,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19607,7 +35810,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19628,7 +35831,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19651,7 +35854,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19676,7 +35879,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19689,7 +35892,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19704,7 +35907,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19721,7 +35924,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19740,7 +35943,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19761,7 +35964,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19784,7 +35987,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19809,7 +36012,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19822,7 +36025,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19837,7 +36040,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19854,7 +36057,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19867,7 +36070,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19880,7 +36083,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19895,7 +36098,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19912,7 +36115,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19931,7 +36134,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19952,7 +36155,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19975,7 +36178,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20000,7 +36203,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20013,7 +36216,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20028,7 +36231,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20045,7 +36248,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20064,7 +36267,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20085,7 +36288,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20108,7 +36311,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20133,7 +36336,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20146,7 +36349,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20161,7 +36364,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20178,7 +36381,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20197,7 +36400,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20218,7 +36421,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20241,7 +36444,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20266,7 +36469,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20279,7 +36482,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20294,7 +36497,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20311,7 +36514,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20324,7 +36527,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20339,7 +36542,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20356,7 +36559,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20375,7 +36578,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20396,7 +36599,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20419,7 +36622,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20444,7 +36647,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20457,7 +36660,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20472,7 +36675,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20489,7 +36692,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20508,7 +36711,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20529,7 +36732,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20552,7 +36755,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20577,7 +36780,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20590,7 +36793,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20605,7 +36808,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20622,7 +36825,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20635,7 +36838,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20648,7 +36851,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20663,7 +36866,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20680,7 +36883,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20699,7 +36902,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20720,7 +36923,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20743,7 +36946,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20768,7 +36971,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20781,7 +36984,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20796,7 +36999,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20813,7 +37016,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20832,7 +37035,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20853,7 +37056,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20876,7 +37079,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20901,7 +37104,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20914,7 +37117,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20929,7 +37132,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20946,7 +37149,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20959,7 +37162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20972,7 +37175,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20987,7 +37190,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21004,7 +37207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21023,7 +37226,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21044,7 +37247,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21067,7 +37270,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21092,7 +37295,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21105,7 +37308,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21120,7 +37323,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21137,7 +37340,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21156,7 +37359,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21177,7 +37380,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21200,7 +37403,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21225,7 +37428,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21238,7 +37441,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21253,7 +37456,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21270,7 +37473,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21283,7 +37486,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21296,7 +37499,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21311,7 +37514,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21328,7 +37531,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21347,7 +37550,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21368,7 +37571,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21391,7 +37594,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21416,7 +37619,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21429,7 +37632,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21444,7 +37647,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21461,7 +37664,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21480,7 +37683,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21501,7 +37704,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21524,7 +37727,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21549,7 +37752,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21562,7 +37765,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21577,7 +37780,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21594,7 +37797,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21607,7 +37810,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21620,7 +37823,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21635,7 +37838,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21652,7 +37855,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21671,7 +37874,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21692,7 +37895,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21715,7 +37918,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21740,7 +37943,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21753,7 +37956,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21768,7 +37971,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21785,7 +37988,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21798,7 +38001,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21811,7 +38014,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21826,7 +38029,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21843,7 +38046,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21862,7 +38065,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21883,7 +38086,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21906,7 +38109,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21931,7 +38134,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21944,7 +38147,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21959,7 +38162,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21976,7 +38179,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21989,7 +38192,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22002,7 +38205,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22017,7 +38220,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22034,7 +38237,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22053,7 +38256,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22074,7 +38277,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22097,7 +38300,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22122,7 +38325,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22135,7 +38338,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22150,7 +38353,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22167,7 +38370,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22180,7 +38383,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22193,7 +38396,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22208,7 +38411,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22225,7 +38428,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22244,7 +38447,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22265,7 +38468,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22288,7 +38491,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22313,7 +38516,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22326,7 +38529,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22341,7 +38544,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22358,7 +38561,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22371,7 +38574,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22384,7 +38587,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22399,7 +38602,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22416,7 +38619,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22435,7 +38638,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22456,7 +38659,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22479,7 +38682,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22504,7 +38707,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22517,7 +38720,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22532,7 +38735,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22549,7 +38752,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22568,7 +38771,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22589,7 +38792,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22612,7 +38815,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22637,7 +38840,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22650,7 +38853,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22665,7 +38868,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22682,7 +38885,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22701,7 +38904,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22722,7 +38925,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22745,7 +38948,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22770,7 +38973,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22783,7 +38986,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22798,7 +39001,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22815,7 +39018,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22834,7 +39037,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22855,7 +39058,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22878,7 +39081,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22903,7 +39106,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22916,7 +39119,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22931,7 +39134,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22948,7 +39151,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22961,7 +39164,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22974,7 +39177,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22989,7 +39192,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23006,7 +39209,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23025,7 +39228,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23046,7 +39249,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23069,7 +39272,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23094,7 +39297,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23107,7 +39310,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23122,7 +39325,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23139,7 +39342,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23158,7 +39361,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23179,7 +39382,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23202,7 +39405,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23227,7 +39430,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23240,7 +39443,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23255,7 +39458,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23272,7 +39475,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23291,7 +39494,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23312,7 +39515,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23335,7 +39538,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23360,7 +39563,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23373,7 +39576,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23388,7 +39591,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23405,7 +39608,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23424,7 +39627,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23445,7 +39648,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23468,7 +39671,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23493,7 +39696,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23506,7 +39709,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23521,7 +39724,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23538,7 +39741,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23551,7 +39754,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23564,7 +39767,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23579,7 +39782,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23596,7 +39799,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23615,7 +39818,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23636,7 +39839,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23659,7 +39862,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23684,7 +39887,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23697,7 +39900,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23712,7 +39915,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23729,7 +39932,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23748,7 +39951,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23769,7 +39972,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23792,7 +39995,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23817,7 +40020,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23830,7 +40033,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23845,7 +40048,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23862,7 +40065,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23881,7 +40084,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23902,7 +40105,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23925,7 +40128,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23950,7 +40153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23963,7 +40166,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23978,7 +40181,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23995,7 +40198,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24014,7 +40217,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24035,7 +40238,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24058,7 +40261,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24083,7 +40286,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24096,7 +40299,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24111,7 +40314,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24128,7 +40331,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24141,7 +40344,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24156,7 +40359,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24173,7 +40376,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24192,7 +40395,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24213,7 +40416,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24236,7 +40439,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24261,7 +40464,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24274,7 +40477,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24289,7 +40492,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24306,7 +40509,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24325,7 +40528,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24346,7 +40549,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24369,7 +40572,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24394,7 +40597,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24407,7 +40610,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24422,7 +40625,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24439,7 +40642,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24458,7 +40661,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24479,7 +40682,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24502,7 +40705,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24527,7 +40730,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24540,7 +40743,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24555,7 +40758,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24572,7 +40775,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24591,7 +40794,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24612,7 +40815,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24635,7 +40838,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24660,7 +40863,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24673,7 +40876,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24688,7 +40891,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24705,7 +40908,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24724,7 +40927,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24745,7 +40948,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24768,7 +40971,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24793,7 +40996,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24806,7 +41009,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24821,7 +41024,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24838,7 +41041,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24857,7 +41060,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24878,7 +41081,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24901,7 +41104,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24926,7 +41129,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24939,7 +41142,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24954,7 +41157,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24971,7 +41174,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24990,7 +41193,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25011,7 +41214,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25034,7 +41237,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25059,7 +41262,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25072,7 +41275,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25087,7 +41290,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25104,7 +41307,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25117,7 +41320,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25130,7 +41333,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25145,7 +41348,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25162,7 +41365,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25181,7 +41384,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25202,7 +41405,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25225,7 +41428,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25250,7 +41453,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25263,7 +41466,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25278,7 +41481,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25295,7 +41498,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25314,7 +41517,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25335,7 +41538,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25358,7 +41561,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25383,7 +41586,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25396,7 +41599,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25411,7 +41614,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25428,7 +41631,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25447,7 +41650,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25468,7 +41671,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25491,7 +41694,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25516,7 +41719,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25529,7 +41732,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25544,7 +41747,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25561,7 +41764,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25574,7 +41777,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25587,7 +41790,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25602,7 +41805,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25619,7 +41822,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25638,7 +41841,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25659,7 +41862,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25682,7 +41885,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25707,7 +41910,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25720,7 +41923,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25735,7 +41938,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25752,7 +41955,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25771,7 +41974,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25792,7 +41995,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25815,7 +42018,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25840,7 +42043,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25853,7 +42056,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25868,7 +42071,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25885,7 +42088,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25904,7 +42107,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25925,7 +42128,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25948,7 +42151,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25973,7 +42176,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25986,7 +42189,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26001,7 +42204,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26018,7 +42221,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26031,7 +42234,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26044,7 +42247,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26059,7 +42262,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26076,7 +42279,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26095,7 +42298,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26116,7 +42319,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26139,7 +42342,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26164,7 +42367,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26177,7 +42380,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26192,7 +42395,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26209,7 +42412,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26228,7 +42431,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26249,7 +42452,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26272,7 +42475,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26297,7 +42500,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26310,7 +42513,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26325,7 +42528,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26342,7 +42545,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26361,7 +42564,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26382,7 +42585,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26405,7 +42608,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26430,7 +42633,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26443,7 +42646,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26458,7 +42661,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26475,7 +42678,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26488,7 +42691,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26503,7 +42706,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26520,7 +42723,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26539,7 +42742,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26560,7 +42763,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26583,7 +42786,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26608,7 +42811,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26621,7 +42824,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26636,7 +42839,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26653,7 +42856,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26672,7 +42875,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26693,7 +42896,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26716,7 +42919,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26741,7 +42944,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26754,7 +42957,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26769,7 +42972,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26786,7 +42989,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26799,7 +43002,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26812,7 +43015,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26827,7 +43030,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26844,7 +43047,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26863,7 +43066,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26884,7 +43087,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26907,7 +43110,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26932,7 +43135,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26945,7 +43148,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26960,7 +43163,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26977,7 +43180,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26996,7 +43199,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27017,7 +43220,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27040,7 +43243,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27065,7 +43268,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27078,7 +43281,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27093,7 +43296,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27110,7 +43313,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27123,7 +43326,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27136,7 +43339,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27151,7 +43354,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27168,7 +43371,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27187,7 +43390,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27208,7 +43411,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27231,7 +43434,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27256,7 +43459,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27269,7 +43472,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27284,7 +43487,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27301,7 +43504,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27320,7 +43523,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27341,7 +43544,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27364,7 +43567,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27389,7 +43592,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27402,7 +43605,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27417,7 +43620,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27434,7 +43637,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27447,7 +43650,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27460,7 +43663,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27475,7 +43678,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27492,7 +43695,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27511,7 +43714,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27532,7 +43735,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27555,7 +43758,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27580,7 +43783,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27593,7 +43796,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27608,7 +43811,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27625,7 +43828,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27644,7 +43847,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27665,7 +43868,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27688,7 +43891,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27713,7 +43916,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27726,7 +43929,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27741,7 +43944,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27758,7 +43961,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27771,7 +43974,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27784,7 +43987,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27799,7 +44002,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27816,7 +44019,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27835,7 +44038,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27856,7 +44059,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27879,7 +44082,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27904,7 +44107,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27917,7 +44120,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27932,7 +44135,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27949,7 +44152,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27962,7 +44165,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27975,7 +44178,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27990,7 +44193,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28007,7 +44210,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28026,7 +44229,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28047,7 +44250,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28070,7 +44273,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28095,7 +44298,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28108,7 +44311,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28123,7 +44326,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28140,7 +44343,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28153,7 +44356,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28166,7 +44369,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28181,7 +44384,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28198,7 +44401,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28217,7 +44420,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28238,7 +44441,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28261,7 +44464,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28286,7 +44489,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28299,7 +44502,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28314,7 +44517,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28331,7 +44534,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28344,7 +44547,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28357,7 +44560,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28372,7 +44575,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28389,7 +44592,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28408,7 +44611,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28429,7 +44632,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28452,7 +44655,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28477,7 +44680,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28490,7 +44693,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28505,7 +44708,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28522,7 +44725,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28535,7 +44738,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28548,7 +44751,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28563,7 +44766,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28580,7 +44783,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28599,7 +44802,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28620,7 +44823,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28643,7 +44846,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28668,7 +44871,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28681,7 +44884,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28696,7 +44899,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28713,7 +44916,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28732,7 +44935,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28753,7 +44956,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28776,7 +44979,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28801,7 +45004,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28814,7 +45017,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28829,7 +45032,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28846,7 +45049,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28865,7 +45068,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28886,7 +45089,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28909,7 +45112,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28934,7 +45137,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28947,7 +45150,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28962,7 +45165,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28979,7 +45182,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28992,7 +45195,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29005,7 +45208,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29020,7 +45223,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29037,7 +45240,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29056,7 +45259,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29077,7 +45280,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29100,7 +45303,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29125,7 +45328,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29138,7 +45341,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29153,7 +45356,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29170,7 +45373,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29189,7 +45392,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29210,7 +45413,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29233,7 +45436,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29258,7 +45461,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29271,7 +45474,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29286,7 +45489,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29303,7 +45506,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29322,7 +45525,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29343,7 +45546,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29366,7 +45569,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29391,7 +45594,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29404,7 +45607,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29419,7 +45622,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29436,7 +45639,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29449,7 +45652,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29462,7 +45665,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29477,7 +45680,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29494,7 +45697,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29513,7 +45716,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29534,7 +45737,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29557,7 +45760,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29582,7 +45785,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29595,7 +45798,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29610,7 +45813,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29627,7 +45830,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29646,7 +45849,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29667,7 +45870,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29690,7 +45893,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29715,7 +45918,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29728,7 +45931,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29743,7 +45946,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29760,7 +45963,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29779,7 +45982,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29800,7 +46003,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29823,7 +46026,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29848,7 +46051,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29861,7 +46064,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29876,7 +46079,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29893,7 +46096,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29906,7 +46109,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29919,7 +46122,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29934,7 +46137,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29951,7 +46154,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29970,7 +46173,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29991,7 +46194,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30014,7 +46217,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30039,7 +46242,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30052,7 +46255,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30067,7 +46270,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30084,7 +46287,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30103,7 +46306,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30124,7 +46327,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30147,7 +46350,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30172,7 +46375,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30185,7 +46388,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30200,7 +46403,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30217,7 +46420,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30236,7 +46439,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30257,7 +46460,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30280,7 +46483,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30305,7 +46508,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30318,7 +46521,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30333,7 +46536,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30350,7 +46553,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30363,7 +46566,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30378,7 +46581,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30395,7 +46598,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30414,7 +46617,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30435,7 +46638,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30458,7 +46661,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30483,7 +46686,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30496,7 +46699,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30511,7 +46714,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30528,7 +46731,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30547,7 +46750,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30568,7 +46771,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30591,7 +46794,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30616,7 +46819,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30629,7 +46832,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30644,7 +46847,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30661,7 +46864,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30674,7 +46877,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30687,7 +46890,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30702,7 +46905,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30719,7 +46922,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30738,7 +46941,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30759,7 +46962,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30782,7 +46985,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30807,7 +47010,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30820,7 +47023,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30835,7 +47038,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30852,7 +47055,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30871,7 +47074,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30892,7 +47095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30915,7 +47118,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30940,7 +47143,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30953,7 +47156,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30968,7 +47171,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30985,7 +47188,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30998,7 +47201,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31011,7 +47214,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31026,7 +47229,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31043,7 +47246,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31062,7 +47265,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31083,7 +47286,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31106,7 +47309,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31131,7 +47334,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31144,7 +47347,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31159,7 +47362,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31176,7 +47379,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31195,7 +47398,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31216,7 +47419,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31239,7 +47442,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31264,7 +47467,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31277,7 +47480,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31292,7 +47495,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31309,7 +47512,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31322,7 +47525,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31335,7 +47538,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31350,7 +47553,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31367,7 +47570,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31386,7 +47589,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31407,7 +47610,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31430,7 +47633,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31455,7 +47658,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31468,7 +47671,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31483,7 +47686,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31500,7 +47703,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31519,7 +47722,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31540,7 +47743,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31563,7 +47766,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31588,7 +47791,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31601,7 +47804,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31616,7 +47819,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31633,7 +47836,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31646,7 +47849,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31659,7 +47862,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31674,7 +47877,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31691,7 +47894,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31710,7 +47913,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31731,7 +47934,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31754,7 +47957,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31779,7 +47982,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31792,7 +47995,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31807,7 +48010,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31824,7 +48027,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31837,7 +48040,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei8_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31850,7 +48053,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31865,7 +48068,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31882,7 +48085,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31901,7 +48104,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31922,7 +48125,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31945,7 +48148,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31970,7 +48173,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31983,7 +48186,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31998,7 +48201,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32015,7 +48218,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32028,7 +48231,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei16_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32041,7 +48244,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32056,7 +48259,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32073,7 +48276,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32092,7 +48295,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32113,7 +48316,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32136,7 +48339,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32161,7 +48364,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32174,7 +48377,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32189,7 +48392,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32206,7 +48409,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32219,7 +48422,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei32_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32232,7 +48435,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32247,7 +48450,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32264,7 +48467,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32283,7 +48486,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg5ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32304,7 +48507,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg6ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32327,7 +48530,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg7ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32352,7 +48555,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg8ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32365,7 +48568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32380,7 +48583,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg3ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32397,7 +48600,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg4ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32410,7 +48613,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vloxseg2ei64_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -11,632 +11,1177 @@ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlse8(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { + return vlse32(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { + return vlse64(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vlse8_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m1_t test_vlse8_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m2_t test_vlse8_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m4_t test_vlse8_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m8_t test_vlse8_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m1_t test_vlse16_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m2_t test_vlse16_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m4_t test_vlse16_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m8_t test_vlse16_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m1_t test_vlse32_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m2_t test_vlse32_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m4_t test_vlse32_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m8_t test_vlse32_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m1_t test_vlse64_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m2_t test_vlse64_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m4_t test_vlse64_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m8_t test_vlse64_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( +// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c @@ -8,7 +8,7 @@ #include -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -17,11 +17,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -32,11 +32,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -49,11 +49,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -68,11 +68,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -89,11 +89,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -112,11 +112,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -137,11 +137,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -150,11 +150,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -165,11 +165,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -182,11 +182,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -201,11 +201,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -222,11 +222,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -245,11 +245,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -270,11 +270,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -283,11 +283,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -298,11 +298,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -315,11 +315,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -334,11 +334,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -355,11 +355,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -378,11 +378,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -403,11 +403,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -416,11 +416,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -431,11 +431,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -448,11 +448,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -467,11 +467,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -488,11 +488,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -511,11 +511,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -536,11 +536,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -549,11 +549,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -564,11 +564,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -581,11 +581,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -594,11 +594,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -607,11 +607,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -622,11 +622,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -639,11 +639,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -658,11 +658,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -679,11 +679,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -702,11 +702,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -727,11 +727,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -740,11 +740,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -755,11 +755,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -772,11 +772,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -791,11 +791,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -812,11 +812,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -835,11 +835,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -860,11 +860,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -873,11 +873,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -888,11 +888,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -905,11 +905,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -924,11 +924,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -945,11 +945,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -968,11 +968,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -993,11 +993,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1006,11 +1006,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1021,11 +1021,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1038,11 +1038,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1051,11 +1051,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1064,11 +1064,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1079,11 +1079,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1096,11 +1096,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1115,11 +1115,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1136,11 +1136,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1159,11 +1159,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1184,11 +1184,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1197,11 +1197,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1212,11 +1212,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1229,11 +1229,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1248,11 +1248,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1269,11 +1269,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1292,11 +1292,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1317,11 +1317,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1330,11 +1330,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1345,11 +1345,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1362,11 +1362,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1375,11 +1375,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1388,11 +1388,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1403,11 +1403,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1420,11 +1420,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1439,11 +1439,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1460,11 +1460,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1483,11 +1483,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1508,11 +1508,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1521,11 +1521,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1536,11 +1536,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1553,11 +1553,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1566,11 +1566,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1579,11 +1579,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1594,11 +1594,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1611,11 +1611,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1630,11 +1630,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1651,11 +1651,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1674,11 +1674,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1699,11 +1699,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1712,11 +1712,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1727,11 +1727,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1744,11 +1744,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1763,11 +1763,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1784,11 +1784,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1807,11 +1807,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1832,11 +1832,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1845,11 +1845,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1860,11 +1860,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -1877,11 +1877,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -1896,11 +1896,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -1917,11 +1917,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -1940,11 +1940,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -1965,11 +1965,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -1978,11 +1978,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -1993,11 +1993,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2010,11 +2010,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2029,11 +2029,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2050,11 +2050,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2073,11 +2073,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2098,11 +2098,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2111,11 +2111,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2126,11 +2126,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2143,11 +2143,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2156,11 +2156,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2169,11 +2169,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2184,11 +2184,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2201,11 +2201,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2220,11 +2220,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2241,11 +2241,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2264,11 +2264,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2289,11 +2289,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2302,11 +2302,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2317,11 +2317,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2334,11 +2334,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2353,11 +2353,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2374,11 +2374,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2397,11 +2397,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2422,11 +2422,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2435,11 +2435,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2450,11 +2450,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2467,11 +2467,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2486,11 +2486,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2507,11 +2507,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2530,11 +2530,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2555,11 +2555,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2568,11 +2568,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2583,11 +2583,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2600,11 +2600,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2613,11 +2613,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2626,11 +2626,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2641,11 +2641,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2658,11 +2658,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2677,11 +2677,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2698,11 +2698,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2721,11 +2721,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2746,11 +2746,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2759,11 +2759,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2774,11 +2774,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2791,11 +2791,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -2810,11 +2810,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -2831,11 +2831,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -2854,11 +2854,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -2879,11 +2879,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2892,11 +2892,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2907,11 +2907,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2924,11 +2924,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2937,11 +2937,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -2950,11 +2950,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -2965,11 +2965,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -2982,11 +2982,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3001,11 +3001,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3022,11 +3022,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3045,11 +3045,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3070,11 +3070,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3083,11 +3083,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3098,11 +3098,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3115,11 +3115,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3128,11 +3128,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3141,11 +3141,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3156,11 +3156,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3173,11 +3173,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3192,11 +3192,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3213,11 +3213,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3236,11 +3236,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3261,11 +3261,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3274,11 +3274,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3289,11 +3289,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3306,11 +3306,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3325,11 +3325,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3346,11 +3346,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3369,11 +3369,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3394,11 +3394,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3407,11 +3407,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3422,11 +3422,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3439,11 +3439,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3458,11 +3458,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3479,11 +3479,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3502,11 +3502,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3527,11 +3527,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3540,11 +3540,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3555,11 +3555,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3572,11 +3572,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3585,11 +3585,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3598,11 +3598,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3613,11 +3613,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3630,11 +3630,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3649,11 +3649,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3670,11 +3670,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3693,11 +3693,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3718,11 +3718,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3731,11 +3731,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3746,11 +3746,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3763,11 +3763,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3782,11 +3782,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl, uint8_t ta) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3803,11 +3803,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl, uint8_t ta) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -3826,11 +3826,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl, uint8_t ta) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -3851,11 +3851,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl, uint8_t ta) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3864,11 +3864,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3879,11 +3879,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3896,11 +3896,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3909,11 +3909,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl, uint8_t ta) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -3922,11 +3922,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -3937,11 +3937,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -3954,11 +3954,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 @@ -3973,11 +3973,11 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl, uint8_t ta) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 @@ -3994,11 +3994,11 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl, uint8_t ta) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 @@ -4017,11 +4017,11 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl, uint8_t ta) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_mt( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 @@ -4042,11 +4042,11 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl, uint8_t ta) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -4055,11 +4055,11 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 @@ -4070,11 +4070,11 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl, uint8_t ta) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_mt( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 @@ -4087,11 +4087,11 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl, uint8_t ta) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_mt( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 @@ -4100,7 +4100,13199 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl, uint8_t ta) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl, size_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl, size_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl, size_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl, size_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl, size_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl, size_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl, size_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl, size_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl, size_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl, size_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl, size_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl, size_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl, size_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl, size_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl, size_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl, size_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl, size_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl, size_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl, size_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl, size_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl, size_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl, size_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl, size_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl, size_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl, size_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl, size_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl, size_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl, size_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl, size_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl, size_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl, size_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl, size_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl, size_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl, size_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl, size_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl, size_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl, size_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c @@ -8,6 +8,4102 @@ #include +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -17,7 +4113,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -32,7 +4128,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -49,7 +4145,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -68,7 +4164,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -89,7 +4185,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -112,7 +4208,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -137,7 +4233,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -150,7 +4246,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -165,7 +4261,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -182,7 +4278,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -201,7 +4297,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -222,7 +4318,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -245,7 +4341,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -270,7 +4366,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -283,7 +4379,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -298,7 +4394,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -315,7 +4411,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -334,7 +4430,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -355,7 +4451,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -378,7 +4474,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -403,7 +4499,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -416,7 +4512,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -431,7 +4527,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -448,7 +4544,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -467,7 +4563,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -488,7 +4584,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -511,7 +4607,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -536,7 +4632,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -549,7 +4645,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -564,7 +4660,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -581,7 +4677,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -594,7 +4690,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -607,7 +4703,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -622,7 +4718,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -639,7 +4735,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -658,7 +4754,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -679,7 +4775,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -702,7 +4798,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -727,7 +4823,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -740,7 +4836,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -755,7 +4851,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -772,7 +4868,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -791,7 +4887,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -812,7 +4908,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -835,7 +4931,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -860,7 +4956,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -873,7 +4969,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -888,7 +4984,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -905,7 +5001,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -924,7 +5020,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -945,7 +5041,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -968,7 +5064,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -993,7 +5089,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1006,7 +5102,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1021,7 +5117,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1038,7 +5134,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1051,7 +5147,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1064,7 +5160,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1079,7 +5175,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1096,7 +5192,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1115,7 +5211,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1136,7 +5232,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1159,7 +5255,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1184,7 +5280,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1197,7 +5293,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1212,7 +5308,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1229,7 +5325,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1248,7 +5344,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1269,7 +5365,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1292,7 +5388,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1317,7 +5413,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1330,7 +5426,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1345,7 +5441,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1362,7 +5458,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1375,7 +5471,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1388,7 +5484,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1403,7 +5499,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1420,7 +5516,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1439,7 +5535,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1460,7 +5556,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1483,7 +5579,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1508,7 +5604,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1521,7 +5617,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1536,7 +5632,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1553,7 +5649,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1566,7 +5662,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1579,7 +5675,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1594,7 +5690,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1611,7 +5707,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1630,7 +5726,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1651,7 +5747,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1674,7 +5770,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1699,7 +5795,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1712,7 +5808,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1727,7 +5823,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1744,7 +5840,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1763,7 +5859,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1784,7 +5880,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1807,7 +5903,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1832,7 +5928,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1845,7 +5941,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1860,7 +5956,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1877,7 +5973,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1896,7 +5992,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1917,7 +6013,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1940,7 +6036,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1965,7 +6061,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1978,7 +6074,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -1993,7 +6089,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2010,7 +6106,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2029,7 +6125,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2050,7 +6146,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2073,7 +6169,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2098,7 +6194,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2111,7 +6207,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2126,7 +6222,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2143,7 +6239,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2156,7 +6252,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e8_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2169,7 +6265,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2184,7 +6280,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2201,7 +6297,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2220,7 +6316,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2241,7 +6337,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2264,7 +6360,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2289,7 +6385,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2302,7 +6398,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2317,7 +6413,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2334,7 +6430,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2353,7 +6449,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2374,7 +6470,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2397,7 +6493,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2422,7 +6518,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2435,7 +6531,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2450,7 +6546,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2467,7 +6563,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2486,7 +6582,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2507,7 +6603,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2530,7 +6626,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2555,7 +6651,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2568,7 +6664,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2583,7 +6679,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2600,7 +6696,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2613,7 +6709,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2626,7 +6722,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2641,7 +6737,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2658,7 +6754,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2677,7 +6773,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2698,7 +6794,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2721,7 +6817,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2746,7 +6842,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2759,7 +6855,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2774,7 +6870,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2791,7 +6887,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2810,7 +6906,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2831,7 +6927,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2854,7 +6950,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2879,7 +6975,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2892,7 +6988,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2907,7 +7003,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2924,7 +7020,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2937,7 +7033,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2950,7 +7046,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2965,7 +7061,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -2982,7 +7078,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3001,7 +7097,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3022,7 +7118,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3045,7 +7141,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3070,7 +7166,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3083,7 +7179,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3098,7 +7194,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3115,7 +7211,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3128,7 +7224,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3141,7 +7237,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3156,7 +7252,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3173,7 +7269,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3192,7 +7288,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3213,7 +7309,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3236,7 +7332,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3261,7 +7357,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3274,7 +7370,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3289,7 +7385,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3306,7 +7402,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3325,7 +7421,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3346,7 +7442,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3369,7 +7465,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3394,7 +7490,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3407,7 +7503,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3422,7 +7518,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3439,7 +7535,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3458,7 +7554,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3479,7 +7575,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3502,7 +7598,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3527,7 +7623,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3540,7 +7636,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3555,7 +7651,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3572,7 +7668,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3585,7 +7681,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e16_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3598,7 +7694,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3613,7 +7709,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3630,7 +7726,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3649,7 +7745,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3670,7 +7766,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3693,7 +7789,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3718,7 +7814,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3731,7 +7827,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3746,7 +7842,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3763,7 +7859,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3782,7 +7878,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3803,7 +7899,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3826,7 +7922,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3851,7 +7947,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3864,7 +7960,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3879,7 +7975,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3896,7 +7992,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3909,7 +8005,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e32_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3922,7 +8018,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3937,7 +8033,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3954,7 +8050,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3973,7 +8069,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg5e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -3994,7 +8090,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg6e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4017,7 +8113,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg7e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4042,7 +8138,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg8e64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4055,7 +8151,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4070,7 +8166,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg3e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4087,7 +8183,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg4e64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } @@ -4100,7 +8196,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { +void test_vlsseg2e64_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, size_t ta) { return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -1565,6 +1565,216 @@ return vluxei64(base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1922,7 +2132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( @@ -1932,7 +2142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( @@ -1942,7 +2152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( @@ -1952,7 +2162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( @@ -1962,7 +2172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( @@ -1972,7 +2182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( @@ -1982,7 +2192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( @@ -1992,7 +2202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( @@ -2002,7 +2212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( @@ -2012,7 +2222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( @@ -2022,7 +2232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( @@ -2032,7 +2242,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( @@ -2042,7 +2252,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( @@ -2052,7 +2262,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( @@ -2062,7 +2272,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( @@ -2072,7 +2282,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( @@ -2082,7 +2292,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( @@ -2092,7 +2302,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( @@ -2102,7 +2312,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( @@ -2112,7 +2322,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( @@ -2122,7 +2332,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( @@ -2132,7 +2342,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( @@ -2142,7 +2352,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( @@ -2152,7 +2362,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( @@ -2162,7 +2372,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( @@ -2172,7 +2382,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( @@ -2182,7 +2392,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( @@ -2192,7 +2402,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( @@ -2202,7 +2412,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( @@ -2212,7 +2422,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( @@ -2222,7 +2432,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( @@ -2232,7 +2442,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( @@ -2242,7 +2452,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( @@ -2252,7 +2462,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( @@ -2262,7 +2472,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( @@ -2272,7 +2482,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( @@ -2282,7 +2492,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( @@ -2292,7 +2502,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( @@ -2302,7 +2512,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( @@ -2312,7 +2522,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( @@ -2322,7 +2532,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( @@ -2332,7 +2542,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( @@ -2342,7 +2552,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( @@ -2352,7 +2562,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( @@ -2362,7 +2572,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( @@ -2372,7 +2582,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( @@ -2382,7 +2592,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( @@ -2392,7 +2602,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( @@ -2402,7 +2612,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( @@ -2412,7 +2622,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( @@ -2422,7 +2632,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( @@ -2432,7 +2642,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( @@ -2442,7 +2652,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( @@ -2452,7 +2662,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( @@ -2462,7 +2672,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( @@ -2472,7 +2682,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( @@ -2482,7 +2692,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( @@ -2492,7 +2702,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( @@ -2502,7 +2712,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( @@ -2512,7 +2722,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( @@ -2522,7 +2732,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( @@ -2532,7 +2742,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( @@ -2542,7 +2752,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( @@ -2552,7 +2762,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( @@ -2562,7 +2772,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( @@ -2572,7 +2782,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( @@ -2582,7 +2792,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( @@ -2592,7 +2802,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( @@ -2602,7 +2812,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( @@ -2612,7 +2822,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( @@ -2622,7 +2832,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( @@ -2632,7 +2842,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( @@ -2642,7 +2852,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( @@ -2652,7 +2862,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( @@ -2662,7 +2872,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( @@ -2672,7 +2882,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( @@ -2682,7 +2892,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( @@ -2692,7 +2902,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( @@ -2702,7 +2912,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( @@ -2712,7 +2922,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( @@ -2722,7 +2932,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( @@ -2732,7 +2942,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( @@ -2742,7 +2952,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( @@ -2752,7 +2962,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( @@ -2762,7 +2972,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( @@ -2772,7 +2982,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( @@ -2782,7 +2992,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( @@ -2792,7 +3002,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( @@ -2802,7 +3012,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( @@ -2812,7 +3022,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( @@ -2822,7 +3032,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( @@ -2832,7 +3042,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( @@ -2842,7 +3052,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( @@ -2852,7 +3062,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( @@ -2862,7 +3072,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( @@ -2872,7 +3082,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( @@ -2882,7 +3092,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( @@ -2892,7 +3102,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( @@ -2902,7 +3112,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( @@ -2912,7 +3122,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( @@ -2922,7 +3132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( @@ -2932,7 +3142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( @@ -2942,7 +3152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( @@ -2952,7 +3162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( @@ -2962,7 +3172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( @@ -2972,7 +3182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( @@ -2982,7 +3192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( @@ -2992,7 +3202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( @@ -3002,7 +3212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( @@ -3012,7 +3222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( @@ -3022,7 +3232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( @@ -3032,7 +3242,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( @@ -3042,7 +3252,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( @@ -3052,7 +3262,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( @@ -3062,7 +3272,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( @@ -3072,7 +3282,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( @@ -3082,7 +3292,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( @@ -3092,7 +3302,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( @@ -3102,7 +3312,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( @@ -3112,7 +3322,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( @@ -3122,7 +3332,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( @@ -3132,7 +3342,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( @@ -3142,7 +3352,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( @@ -3152,7 +3362,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( @@ -3162,7 +3372,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( @@ -3172,7 +3382,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( @@ -3182,7 +3392,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( @@ -3192,7 +3402,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( @@ -3202,7 +3412,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( @@ -3212,7 +3422,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( @@ -3222,7 +3432,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( @@ -3232,7 +3442,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( @@ -3242,7 +3452,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( @@ -3252,7 +3462,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( @@ -3262,7 +3472,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( @@ -3272,7 +3482,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( @@ -3282,7 +3492,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( @@ -3292,7 +3502,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( @@ -3302,7 +3512,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( @@ -3312,7 +3522,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( @@ -3322,7 +3532,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( @@ -3332,7 +3542,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( @@ -3342,7 +3552,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( @@ -3352,7 +3562,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( @@ -3362,7 +3572,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( @@ -3372,7 +3582,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( @@ -3382,7 +3592,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( @@ -3392,7 +3602,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( @@ -3402,7 +3612,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( @@ -3412,7 +3622,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( @@ -3422,7 +3632,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( @@ -3432,7 +3642,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( @@ -3442,7 +3652,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( @@ -3452,7 +3662,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( @@ -3462,7 +3672,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( @@ -3472,7 +3682,217 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( @@ -3482,7 +3902,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( @@ -3492,7 +3912,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( @@ -3502,7 +3922,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( @@ -3512,7 +3932,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( @@ -3522,7 +3942,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( @@ -3532,7 +3952,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( @@ -3542,7 +3962,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( @@ -3552,7 +3972,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( @@ -3562,7 +3982,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( @@ -3572,7 +3992,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( @@ -3582,7 +4002,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( @@ -3592,7 +4012,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( @@ -3602,7 +4022,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( @@ -3612,7 +4032,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( @@ -3622,7 +4042,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( @@ -3632,7 +4052,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( @@ -3642,7 +4062,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( @@ -3652,7 +4072,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( @@ -3662,7 +4082,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( @@ -3672,7 +4092,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( @@ -3682,7 +4102,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( @@ -3692,7 +4112,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( @@ -3702,7 +4122,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei8(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( @@ -3712,7 +4132,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( @@ -3722,7 +4142,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( @@ -3732,7 +4152,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( @@ -3742,7 +4162,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei16(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( @@ -3752,7 +4172,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( @@ -3762,7 +4182,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( @@ -3772,7 +4192,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( @@ -3782,7 +4202,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei32(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( @@ -3792,7 +4212,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( @@ -3802,7 +4222,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( @@ -3812,7 +4232,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( @@ -3822,5 +4242,2126 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei8_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei8_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei8_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei8_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei8_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei8_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vluxei8_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei16_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei16_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei16_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei16_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei16_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei16_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei32_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei32_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei32_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei32_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei32_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei64_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei64_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei64_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei64_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei8_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei8_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei8_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei8_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei8_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei8_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei16_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei16_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei16_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei16_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei16_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei16_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei32_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei32_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei32_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei32_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei32_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei64_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei64_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei64_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei64_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei8_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei8_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei8_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei8_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei8_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei16_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei16_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei16_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei16_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei16_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei32_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei32_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei32_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei32_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei32_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei64_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei64_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei64_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei64_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei8_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei8_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei8_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei8_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei16_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei16_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei16_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei16_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei32_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei32_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei32_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei32_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei64_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei64_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei64_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei64_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei8_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei8_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei8_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei8_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei8_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei8_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vluxei8_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei16_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei16_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei16_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei16_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei16_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei16_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei32_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei32_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei32_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei32_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei32_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei64_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei64_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei64_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei64_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei8_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei8_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei8_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei8_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei8_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei8_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei16_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei16_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei16_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei16_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei16_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei16_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei32_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei32_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei32_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei32_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei32_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei64_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei64_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei64_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei64_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei8_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei8_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei8_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei8_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei8_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei16_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei16_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei16_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei16_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei16_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei32_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei32_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei32_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei32_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei32_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei64_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei64_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei64_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei64_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei8_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei8_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei8_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei8_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei16_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei16_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei16_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei16_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei32_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei32_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei32_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei32_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei64_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei64_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei64_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei64_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei8_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei8_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei8_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei8_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei8_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei16_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei16_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei16_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei16_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei16_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei32_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei32_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei32_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei32_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei32_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei64_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei64_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei64_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei64_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei8_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei8_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei8_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei8_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl, size_t ta) { + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei16_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei16_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei16_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei16_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl, size_t ta) { + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei32_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei32_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei32_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei32_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl, size_t ta) { + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei64_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei64_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei64_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl, size_t ta) { + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -32,7 +32,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -49,7 +49,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -68,7 +68,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -89,7 +89,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -112,7 +112,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -150,7 +150,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -222,7 +222,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -355,7 +355,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -378,7 +378,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -431,7 +431,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -448,7 +448,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -467,7 +467,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -511,7 +511,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -536,7 +536,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -549,7 +549,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -564,7 +564,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -581,7 +581,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -594,7 +594,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -607,7 +607,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -622,7 +622,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -639,7 +639,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -658,7 +658,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -679,7 +679,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -702,7 +702,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -727,7 +727,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -755,7 +755,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -772,7 +772,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -791,7 +791,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -812,7 +812,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -835,7 +835,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -860,7 +860,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -888,7 +888,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -905,7 +905,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -924,7 +924,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -945,7 +945,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -968,7 +968,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1006,7 +1006,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -1021,7 +1021,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -1057,7 +1057,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1078,7 +1078,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1139,7 +1139,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -1154,7 +1154,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -1184,7 +1184,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -1197,7 +1197,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -1212,7 +1212,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1229,7 +1229,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1248,7 +1248,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1269,7 +1269,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1292,7 +1292,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -1345,7 +1345,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1381,7 +1381,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1402,7 +1402,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1463,7 +1463,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -1478,7 +1478,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1514,7 +1514,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1535,7 +1535,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1583,7 +1583,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1596,7 +1596,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -1611,7 +1611,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1628,7 +1628,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1647,7 +1647,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1668,7 +1668,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1691,7 +1691,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1716,7 +1716,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1729,7 +1729,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -1744,7 +1744,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -1761,7 +1761,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -1774,7 +1774,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -1789,7 +1789,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -1806,7 +1806,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -1825,7 +1825,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1846,7 +1846,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -1869,7 +1869,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -1894,7 +1894,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -1907,7 +1907,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -1922,7 +1922,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -1939,7 +1939,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -1958,7 +1958,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -1979,7 +1979,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2002,7 +2002,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2027,7 +2027,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2040,7 +2040,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -2055,7 +2055,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -2072,7 +2072,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -2091,7 +2091,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2112,7 +2112,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2135,7 +2135,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2160,7 +2160,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2173,7 +2173,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -2188,7 +2188,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -2205,7 +2205,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -2224,7 +2224,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2245,7 +2245,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2268,7 +2268,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2293,7 +2293,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2306,7 +2306,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -2321,7 +2321,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2338,7 +2338,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2357,7 +2357,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2378,7 +2378,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2401,7 +2401,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2426,7 +2426,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2439,7 +2439,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -2454,7 +2454,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2471,7 +2471,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2490,7 +2490,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2511,7 +2511,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2534,7 +2534,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2559,7 +2559,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2572,7 +2572,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -2587,7 +2587,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2604,7 +2604,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2623,7 +2623,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2644,7 +2644,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2667,7 +2667,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2692,7 +2692,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2705,7 +2705,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -2720,7 +2720,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -2737,7 +2737,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -2750,7 +2750,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -2763,7 +2763,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -2778,7 +2778,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -2795,7 +2795,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -2814,7 +2814,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2835,7 +2835,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2858,7 +2858,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -2896,7 +2896,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -2911,7 +2911,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -2928,7 +2928,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -2947,7 +2947,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -2968,7 +2968,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -2991,7 +2991,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3016,7 +3016,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3029,7 +3029,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -3044,7 +3044,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -3061,7 +3061,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -3080,7 +3080,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3101,7 +3101,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3124,7 +3124,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3149,7 +3149,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3162,7 +3162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -3177,7 +3177,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -3194,7 +3194,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -3207,7 +3207,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -3220,7 +3220,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -3235,7 +3235,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3252,7 +3252,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3271,7 +3271,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3292,7 +3292,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3315,7 +3315,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3340,7 +3340,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3353,7 +3353,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -3368,7 +3368,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3385,7 +3385,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3404,7 +3404,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3425,7 +3425,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3448,7 +3448,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3473,7 +3473,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3486,7 +3486,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -3501,7 +3501,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3518,7 +3518,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3537,7 +3537,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3558,7 +3558,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3581,7 +3581,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3606,7 +3606,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3619,7 +3619,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -3634,7 +3634,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -3651,7 +3651,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -3664,7 +3664,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -3677,7 +3677,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -3692,7 +3692,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3709,7 +3709,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3728,7 +3728,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3749,7 +3749,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3772,7 +3772,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3797,7 +3797,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3810,7 +3810,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -3825,7 +3825,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3842,7 +3842,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3861,7 +3861,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -3882,7 +3882,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -3905,7 +3905,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -3930,7 +3930,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -3943,7 +3943,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -3958,7 +3958,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -3975,7 +3975,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -3994,7 +3994,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4015,7 +4015,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4038,7 +4038,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4063,7 +4063,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4076,7 +4076,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -4091,7 +4091,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -4108,7 +4108,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -4121,7 +4121,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -4136,7 +4136,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4153,7 +4153,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4172,7 +4172,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4193,7 +4193,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4216,7 +4216,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4241,7 +4241,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4254,7 +4254,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -4269,7 +4269,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4286,7 +4286,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4305,7 +4305,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4326,7 +4326,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4349,7 +4349,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4374,7 +4374,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4387,7 +4387,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -4402,7 +4402,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -4419,7 +4419,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -4432,7 +4432,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -4445,7 +4445,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -4460,7 +4460,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4477,7 +4477,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4496,7 +4496,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4517,7 +4517,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4540,7 +4540,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4565,7 +4565,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4578,7 +4578,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -4593,7 +4593,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4610,7 +4610,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4629,7 +4629,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4650,7 +4650,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4673,7 +4673,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4698,7 +4698,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4711,7 +4711,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -4726,7 +4726,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -4743,7 +4743,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -4756,7 +4756,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -4769,7 +4769,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -4784,7 +4784,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -4801,7 +4801,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -4820,7 +4820,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4841,7 +4841,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4864,7 +4864,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -4889,7 +4889,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -4902,7 +4902,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -4917,7 +4917,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -4934,7 +4934,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -4953,7 +4953,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -4974,7 +4974,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -4997,7 +4997,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5022,7 +5022,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5035,7 +5035,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -5050,7 +5050,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5067,7 +5067,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5080,7 +5080,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -5093,7 +5093,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -5108,7 +5108,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5125,7 +5125,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5144,7 +5144,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5165,7 +5165,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5188,7 +5188,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5213,7 +5213,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5226,7 +5226,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -5241,7 +5241,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5258,7 +5258,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5277,7 +5277,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5298,7 +5298,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5321,7 +5321,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5346,7 +5346,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5359,7 +5359,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -5374,7 +5374,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -5391,7 +5391,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -5404,7 +5404,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -5417,7 +5417,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -5432,7 +5432,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -5449,7 +5449,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -5468,7 +5468,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5489,7 +5489,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5512,7 +5512,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5537,7 +5537,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5550,7 +5550,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -5565,7 +5565,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -5582,7 +5582,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -5595,7 +5595,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -5608,7 +5608,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -5623,7 +5623,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -5640,7 +5640,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -5659,7 +5659,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5680,7 +5680,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5703,7 +5703,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5728,7 +5728,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5741,7 +5741,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -5756,7 +5756,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -5773,7 +5773,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -5786,7 +5786,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -5799,7 +5799,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -5814,7 +5814,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5831,7 +5831,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5850,7 +5850,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -5871,7 +5871,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -5894,7 +5894,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -5919,7 +5919,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -5932,7 +5932,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -5947,7 +5947,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -5964,7 +5964,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -5977,7 +5977,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -5990,7 +5990,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -6005,7 +6005,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -6022,7 +6022,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -6041,7 +6041,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6062,7 +6062,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6085,7 +6085,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6110,7 +6110,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6123,7 +6123,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -6138,7 +6138,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -6155,7 +6155,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -6168,7 +6168,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -6181,7 +6181,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6196,7 +6196,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6213,7 +6213,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6232,7 +6232,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6253,7 +6253,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6276,7 +6276,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6301,7 +6301,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6314,7 +6314,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6329,7 +6329,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6346,7 +6346,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6365,7 +6365,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6386,7 +6386,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6409,7 +6409,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6434,7 +6434,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6447,7 +6447,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6462,7 +6462,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6479,7 +6479,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6498,7 +6498,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6519,7 +6519,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6542,7 +6542,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6567,7 +6567,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6580,7 +6580,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6595,7 +6595,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6612,7 +6612,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6631,7 +6631,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6652,7 +6652,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6675,7 +6675,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6700,7 +6700,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6713,7 +6713,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6728,7 +6728,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -6745,7 +6745,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -6758,7 +6758,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -6771,7 +6771,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -6786,7 +6786,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -6803,7 +6803,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -6822,7 +6822,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6843,7 +6843,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6866,7 +6866,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -6891,7 +6891,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -6904,7 +6904,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -6919,7 +6919,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -6936,7 +6936,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -6955,7 +6955,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -6976,7 +6976,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -6999,7 +6999,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7024,7 +7024,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7037,7 +7037,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -7052,7 +7052,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7069,7 +7069,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7088,7 +7088,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7109,7 +7109,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7132,7 +7132,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7157,7 +7157,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7170,7 +7170,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -7185,7 +7185,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7202,7 +7202,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7221,7 +7221,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7242,7 +7242,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7265,7 +7265,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7290,7 +7290,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7303,7 +7303,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -7318,7 +7318,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -7335,7 +7335,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -7348,7 +7348,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -7361,7 +7361,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -7376,7 +7376,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7393,7 +7393,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7412,7 +7412,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7433,7 +7433,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7456,7 +7456,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7481,7 +7481,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7494,7 +7494,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -7509,7 +7509,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7526,7 +7526,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7545,7 +7545,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7566,7 +7566,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7589,7 +7589,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7614,7 +7614,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7627,7 +7627,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -7642,7 +7642,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7659,7 +7659,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7678,7 +7678,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7699,7 +7699,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7722,7 +7722,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7747,7 +7747,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7760,7 +7760,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -7775,7 +7775,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7792,7 +7792,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7811,7 +7811,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -7832,7 +7832,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -7855,7 +7855,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -7880,7 +7880,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -7893,7 +7893,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -7908,7 +7908,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -7925,7 +7925,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -7938,7 +7938,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -7953,7 +7953,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -7970,7 +7970,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -7989,7 +7989,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8010,7 +8010,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8033,7 +8033,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8058,7 +8058,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8071,7 +8071,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -8086,7 +8086,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8103,7 +8103,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8122,7 +8122,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8143,7 +8143,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8166,7 +8166,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8191,7 +8191,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8204,7 +8204,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -8219,7 +8219,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8236,7 +8236,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8255,7 +8255,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8276,7 +8276,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8299,7 +8299,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8324,7 +8324,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8337,7 +8337,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -8352,7 +8352,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -8369,7 +8369,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -8388,7 +8388,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8409,7 +8409,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8432,7 +8432,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8457,7 +8457,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8470,7 +8470,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -8485,7 +8485,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8502,7 +8502,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8521,7 +8521,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8542,7 +8542,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8565,7 +8565,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8590,7 +8590,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8603,7 +8603,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -8618,7 +8618,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8635,7 +8635,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8654,7 +8654,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8675,7 +8675,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8698,7 +8698,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8723,7 +8723,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8736,7 +8736,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -8751,7 +8751,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8768,7 +8768,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8787,7 +8787,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8808,7 +8808,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -8831,7 +8831,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -8856,7 +8856,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -8869,7 +8869,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -8884,7 +8884,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -8901,7 +8901,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -8914,7 +8914,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -8927,7 +8927,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -8942,7 +8942,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -8959,7 +8959,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -8978,7 +8978,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -8999,7 +8999,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9022,7 +9022,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9047,7 +9047,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9060,7 +9060,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -9075,7 +9075,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9092,7 +9092,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9111,7 +9111,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9132,7 +9132,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9155,7 +9155,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9180,7 +9180,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9193,7 +9193,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -9208,7 +9208,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9225,7 +9225,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9244,7 +9244,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9265,7 +9265,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9288,7 +9288,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9313,7 +9313,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9326,7 +9326,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -9341,7 +9341,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -9358,7 +9358,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -9371,7 +9371,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -9384,7 +9384,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -9399,7 +9399,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9416,7 +9416,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9435,7 +9435,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9456,7 +9456,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9479,7 +9479,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9504,7 +9504,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9517,7 +9517,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -9532,7 +9532,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9549,7 +9549,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9568,7 +9568,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9589,7 +9589,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9612,7 +9612,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9637,7 +9637,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9650,7 +9650,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -9665,7 +9665,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9682,7 +9682,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9701,7 +9701,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9722,7 +9722,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9745,7 +9745,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9770,7 +9770,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9783,7 +9783,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -9798,7 +9798,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -9815,7 +9815,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -9828,7 +9828,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -9841,7 +9841,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -9856,7 +9856,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -9873,7 +9873,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -9892,7 +9892,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -9913,7 +9913,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -9936,7 +9936,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -9961,7 +9961,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -9974,7 +9974,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -9989,7 +9989,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10006,7 +10006,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10025,7 +10025,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10046,7 +10046,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10069,7 +10069,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10094,7 +10094,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10107,7 +10107,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -10122,7 +10122,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10139,7 +10139,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10158,7 +10158,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10179,7 +10179,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10202,7 +10202,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10227,7 +10227,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10240,7 +10240,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -10255,7 +10255,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -10272,7 +10272,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -10285,7 +10285,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -10300,7 +10300,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10317,7 +10317,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10336,7 +10336,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10357,7 +10357,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10380,7 +10380,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10405,7 +10405,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10418,7 +10418,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -10433,7 +10433,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10450,7 +10450,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10469,7 +10469,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10490,7 +10490,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10513,7 +10513,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10538,7 +10538,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10551,7 +10551,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -10566,7 +10566,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -10583,7 +10583,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -10596,7 +10596,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -10609,7 +10609,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -10624,7 +10624,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10641,7 +10641,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10660,7 +10660,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10681,7 +10681,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10704,7 +10704,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10729,7 +10729,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10742,7 +10742,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -10757,7 +10757,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10774,7 +10774,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10793,7 +10793,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -10814,7 +10814,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -10837,7 +10837,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -10862,7 +10862,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -10875,7 +10875,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -10890,7 +10890,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -10907,7 +10907,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -10920,7 +10920,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -10933,7 +10933,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -10948,7 +10948,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -10965,7 +10965,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -10984,7 +10984,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11005,7 +11005,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11028,7 +11028,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11053,7 +11053,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11066,7 +11066,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -11081,7 +11081,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11098,7 +11098,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -11117,7 +11117,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11138,7 +11138,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11161,7 +11161,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11186,7 +11186,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11199,7 +11199,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -11214,7 +11214,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11231,7 +11231,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -11244,7 +11244,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -11257,7 +11257,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -11272,7 +11272,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11289,7 +11289,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11308,7 +11308,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11329,7 +11329,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11352,7 +11352,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11377,7 +11377,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11390,7 +11390,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -11405,7 +11405,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11422,7 +11422,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11441,7 +11441,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11462,7 +11462,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11485,7 +11485,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11510,7 +11510,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11523,7 +11523,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -11538,7 +11538,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -11555,7 +11555,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -11568,7 +11568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -11581,7 +11581,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -11596,7 +11596,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -11613,7 +11613,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -11632,7 +11632,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11653,7 +11653,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11676,7 +11676,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11701,7 +11701,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11714,7 +11714,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -11729,7 +11729,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -11746,7 +11746,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -11759,7 +11759,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -11772,7 +11772,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -11787,7 +11787,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -11804,7 +11804,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -11823,7 +11823,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -11844,7 +11844,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -11867,7 +11867,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -11892,7 +11892,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -11905,7 +11905,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -11920,7 +11920,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -11937,7 +11937,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -11950,7 +11950,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -11963,7 +11963,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -11978,7 +11978,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -11995,7 +11995,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -12014,7 +12014,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12035,7 +12035,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12058,7 +12058,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12083,7 +12083,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12096,7 +12096,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -12111,7 +12111,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -12128,7 +12128,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -12141,7 +12141,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -12154,7 +12154,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -12169,7 +12169,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -12186,7 +12186,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -12205,7 +12205,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12226,7 +12226,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12249,7 +12249,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12274,7 +12274,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12287,7 +12287,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -12302,7 +12302,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -12319,7 +12319,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -12332,7 +12332,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -12345,7 +12345,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -12360,7 +12360,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12377,7 +12377,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12396,7 +12396,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12417,7 +12417,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12440,7 +12440,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12465,7 +12465,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12478,7 +12478,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -12493,7 +12493,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12510,7 +12510,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12529,7 +12529,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12550,7 +12550,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12573,7 +12573,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12598,7 +12598,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12611,7 +12611,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -12626,7 +12626,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12643,7 +12643,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12662,7 +12662,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12683,7 +12683,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12706,7 +12706,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12731,7 +12731,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12744,7 +12744,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -12759,7 +12759,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -12776,7 +12776,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -12789,7 +12789,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -12802,7 +12802,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -12817,7 +12817,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -12834,7 +12834,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -12853,7 +12853,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -12874,7 +12874,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -12897,7 +12897,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -12922,7 +12922,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -12935,7 +12935,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -12950,7 +12950,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -12967,7 +12967,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -12986,7 +12986,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13007,7 +13007,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13030,7 +13030,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13055,7 +13055,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13068,7 +13068,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -13083,7 +13083,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -13100,7 +13100,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -13119,7 +13119,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13140,7 +13140,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13163,7 +13163,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13188,7 +13188,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13201,7 +13201,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -13216,7 +13216,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -13233,7 +13233,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -13246,7 +13246,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -13259,7 +13259,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -13274,7 +13274,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13291,7 +13291,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13310,7 +13310,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13331,7 +13331,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13354,7 +13354,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13379,7 +13379,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13392,7 +13392,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -13407,7 +13407,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13424,7 +13424,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13443,7 +13443,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13464,7 +13464,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13487,7 +13487,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13512,7 +13512,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13525,7 +13525,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -13540,7 +13540,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13557,7 +13557,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13576,7 +13576,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13597,7 +13597,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13620,7 +13620,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13645,7 +13645,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13658,7 +13658,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -13673,7 +13673,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -13690,7 +13690,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -13703,7 +13703,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -13716,7 +13716,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -13731,7 +13731,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -13748,7 +13748,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -13767,7 +13767,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13788,7 +13788,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13811,7 +13811,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13836,7 +13836,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13849,7 +13849,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -13864,7 +13864,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -13881,7 +13881,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -13900,7 +13900,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -13921,7 +13921,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -13944,7 +13944,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -13969,7 +13969,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -13982,7 +13982,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -13997,7 +13997,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -14014,7 +14014,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -14033,7 +14033,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14054,7 +14054,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14077,7 +14077,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14102,7 +14102,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14115,7 +14115,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -14130,7 +14130,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -14147,7 +14147,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -14160,7 +14160,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -14175,7 +14175,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14192,7 +14192,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14211,7 +14211,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14232,7 +14232,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14255,7 +14255,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14280,7 +14280,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14293,7 +14293,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -14308,7 +14308,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14325,7 +14325,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14344,7 +14344,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14365,7 +14365,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14388,7 +14388,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14413,7 +14413,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14426,7 +14426,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -14441,7 +14441,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -14458,7 +14458,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -14471,7 +14471,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -14484,7 +14484,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -14499,7 +14499,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14516,7 +14516,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14535,7 +14535,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14556,7 +14556,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14579,7 +14579,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14604,7 +14604,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14617,7 +14617,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -14632,7 +14632,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14649,7 +14649,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14668,7 +14668,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14689,7 +14689,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14712,7 +14712,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14737,7 +14737,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14750,7 +14750,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -14765,7 +14765,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -14782,7 +14782,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -14795,7 +14795,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -14808,7 +14808,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -14823,7 +14823,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -14840,7 +14840,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -14859,7 +14859,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -14880,7 +14880,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -14903,7 +14903,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -14928,7 +14928,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -14941,7 +14941,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -14956,7 +14956,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -14973,7 +14973,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -14992,7 +14992,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15013,7 +15013,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15036,7 +15036,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15061,7 +15061,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15074,7 +15074,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -15089,7 +15089,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -15106,7 +15106,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -15119,7 +15119,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -15132,7 +15132,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -15147,7 +15147,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15164,7 +15164,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15183,7 +15183,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15204,7 +15204,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15227,7 +15227,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15252,7 +15252,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15265,7 +15265,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -15280,7 +15280,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15297,7 +15297,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15316,7 +15316,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15337,7 +15337,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15360,7 +15360,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15385,7 +15385,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15398,7 +15398,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -15413,7 +15413,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -15430,7 +15430,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -15443,7 +15443,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -15456,7 +15456,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -15471,7 +15471,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -15488,7 +15488,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -15507,7 +15507,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15528,7 +15528,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15551,7 +15551,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15576,7 +15576,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15589,7 +15589,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -15604,7 +15604,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } @@ -15621,7 +15621,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } @@ -15634,7 +15634,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } @@ -15647,7 +15647,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -15662,7 +15662,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -15679,7 +15679,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -15698,7 +15698,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15719,7 +15719,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15742,7 +15742,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15767,7 +15767,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15780,7 +15780,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -15795,7 +15795,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } @@ -15812,7 +15812,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } @@ -15825,7 +15825,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } @@ -15838,7 +15838,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -15853,7 +15853,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -15870,7 +15870,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -15889,7 +15889,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -15910,7 +15910,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -15933,7 +15933,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -15958,7 +15958,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { +void test_vluxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -15971,7 +15971,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -15986,7 +15986,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } @@ -16003,7 +16003,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } @@ -16016,7 +16016,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } @@ -16029,7 +16029,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -16044,7 +16044,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -16061,7 +16061,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -16080,7 +16080,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } @@ -16101,7 +16101,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } @@ -16124,7 +16124,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } @@ -16149,7 +16149,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { +void test_vluxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } @@ -16162,7 +16162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } @@ -16177,7 +16177,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } @@ -16194,7 +16194,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } @@ -16207,10 +16207,16213 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -16220,7 +32423,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16235,7 +32438,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16252,7 +32455,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16271,7 +32474,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16292,7 +32495,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16315,7 +32518,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16340,7 +32543,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16353,7 +32556,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16368,7 +32571,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16385,7 +32588,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16404,7 +32607,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16425,7 +32628,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16448,7 +32651,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16473,7 +32676,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16486,7 +32689,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16501,7 +32704,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16518,7 +32721,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16537,7 +32740,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16558,7 +32761,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16581,7 +32784,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16606,7 +32809,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16619,7 +32822,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16634,7 +32837,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16651,7 +32854,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16670,7 +32873,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16691,7 +32894,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16714,7 +32917,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16739,7 +32942,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16752,7 +32955,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16767,7 +32970,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16784,7 +32987,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16797,7 +33000,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16810,7 +33013,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16825,7 +33028,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16842,7 +33045,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16861,7 +33064,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16882,7 +33085,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16905,7 +33108,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16930,7 +33133,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16943,7 +33146,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16958,7 +33161,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16975,7 +33178,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -16994,7 +33197,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17015,7 +33218,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17038,7 +33241,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17063,7 +33266,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17076,7 +33279,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17091,7 +33294,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17108,7 +33311,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17127,7 +33330,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17148,7 +33351,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17171,7 +33374,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17196,7 +33399,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17209,7 +33412,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17224,7 +33427,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17241,7 +33444,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17260,7 +33463,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17281,7 +33484,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17304,7 +33507,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17329,7 +33532,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17342,7 +33545,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17357,7 +33560,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17374,7 +33577,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17387,7 +33590,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i8m4_mt(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17400,7 +33603,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17415,7 +33618,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17432,7 +33635,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17451,7 +33654,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17472,7 +33675,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17495,7 +33698,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17520,7 +33723,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17533,7 +33736,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17548,7 +33751,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17565,7 +33768,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17584,7 +33787,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17605,7 +33808,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17628,7 +33831,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17653,7 +33856,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17666,7 +33869,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17681,7 +33884,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17698,7 +33901,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17717,7 +33920,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17738,7 +33941,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17761,7 +33964,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17786,7 +33989,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17799,7 +34002,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17814,7 +34017,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17831,7 +34034,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17850,7 +34053,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17871,7 +34074,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17894,7 +34097,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17919,7 +34122,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17932,7 +34135,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17947,7 +34150,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17964,7 +34167,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i8m2_mt(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17977,7 +34180,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -17992,7 +34195,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18009,7 +34212,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18028,7 +34231,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18049,7 +34252,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18072,7 +34275,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18097,7 +34300,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i8mf8_mt(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18110,7 +34313,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18125,7 +34328,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18142,7 +34345,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18161,7 +34364,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18182,7 +34385,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18205,7 +34408,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18230,7 +34433,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i8mf4_mt(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18243,7 +34446,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18258,7 +34461,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18275,7 +34478,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18294,7 +34497,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18315,7 +34518,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18338,7 +34541,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18363,7 +34566,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i8mf2_mt(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18376,7 +34579,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18391,7 +34594,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18408,7 +34611,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18427,7 +34630,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18448,7 +34651,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18471,7 +34674,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18496,7 +34699,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i8m1_mt(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18509,7 +34712,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18524,7 +34727,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18541,7 +34744,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18560,7 +34763,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18581,7 +34784,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18604,7 +34807,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18629,7 +34832,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18642,7 +34845,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18657,7 +34860,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18674,7 +34877,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18693,7 +34896,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18714,7 +34917,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18737,7 +34940,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18762,7 +34965,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18775,7 +34978,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18790,7 +34993,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18807,7 +35010,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18826,7 +35029,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18847,7 +35050,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18870,7 +35073,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18895,7 +35098,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18908,7 +35111,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18923,7 +35126,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18940,7 +35143,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18953,7 +35156,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18966,7 +35169,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18981,7 +35184,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -18998,7 +35201,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19017,7 +35220,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19038,7 +35241,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19061,7 +35264,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19086,7 +35289,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19099,7 +35302,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19114,7 +35317,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19131,7 +35334,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19150,7 +35353,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19171,7 +35374,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19194,7 +35397,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19219,7 +35422,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19232,7 +35435,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19247,7 +35450,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19264,7 +35467,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19283,7 +35486,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19304,7 +35507,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19327,7 +35530,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19352,7 +35555,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19365,7 +35568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19380,7 +35583,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19397,7 +35600,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19410,7 +35613,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19423,7 +35626,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19438,7 +35641,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19455,7 +35658,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19474,7 +35677,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19495,7 +35698,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19518,7 +35721,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19543,7 +35746,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19556,7 +35759,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19571,7 +35774,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19588,7 +35791,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19607,7 +35810,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19628,7 +35831,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19651,7 +35854,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19676,7 +35879,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19689,7 +35892,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19704,7 +35907,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19721,7 +35924,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19740,7 +35943,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19761,7 +35964,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19784,7 +35987,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19809,7 +36012,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19822,7 +36025,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19837,7 +36040,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19854,7 +36057,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19867,7 +36070,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i16m4_mt(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19880,7 +36083,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19895,7 +36098,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19912,7 +36115,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19931,7 +36134,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19952,7 +36155,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -19975,7 +36178,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20000,7 +36203,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i16mf4_mt(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20013,7 +36216,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20028,7 +36231,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20045,7 +36248,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20064,7 +36267,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20085,7 +36288,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20108,7 +36311,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20133,7 +36336,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i16mf2_mt(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20146,7 +36349,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20161,7 +36364,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20178,7 +36381,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20197,7 +36400,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20218,7 +36421,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20241,7 +36444,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20266,7 +36469,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i16m1_mt(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20279,7 +36482,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20294,7 +36497,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20311,7 +36514,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i16m2_mt(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20324,7 +36527,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20339,7 +36542,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20356,7 +36559,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20375,7 +36578,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20396,7 +36599,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20419,7 +36622,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20444,7 +36647,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20457,7 +36660,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20472,7 +36675,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20489,7 +36692,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20508,7 +36711,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20529,7 +36732,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20552,7 +36755,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20577,7 +36780,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20590,7 +36793,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20605,7 +36808,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20622,7 +36825,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20635,7 +36838,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20648,7 +36851,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20663,7 +36866,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20680,7 +36883,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20699,7 +36902,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20720,7 +36923,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20743,7 +36946,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20768,7 +36971,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20781,7 +36984,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20796,7 +36999,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20813,7 +37016,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20832,7 +37035,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20853,7 +37056,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20876,7 +37079,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20901,7 +37104,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20914,7 +37117,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20929,7 +37132,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20946,7 +37149,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20959,7 +37162,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20972,7 +37175,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -20987,7 +37190,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21004,7 +37207,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21023,7 +37226,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21044,7 +37247,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21067,7 +37270,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21092,7 +37295,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21105,7 +37308,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21120,7 +37323,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21137,7 +37340,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21156,7 +37359,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21177,7 +37380,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21200,7 +37403,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21225,7 +37428,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21238,7 +37441,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21253,7 +37456,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21270,7 +37473,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21283,7 +37486,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21296,7 +37499,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21311,7 +37514,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21328,7 +37531,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21347,7 +37550,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21368,7 +37571,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21391,7 +37594,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21416,7 +37619,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i32mf2_mt(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21429,7 +37632,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21444,7 +37647,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21461,7 +37664,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21480,7 +37683,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21501,7 +37704,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21524,7 +37727,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21549,7 +37752,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i32m1_mt(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21562,7 +37765,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21577,7 +37780,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21594,7 +37797,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i32m2_mt(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21607,7 +37810,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i32m4_mt(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21620,7 +37823,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21635,7 +37838,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21652,7 +37855,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21671,7 +37874,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21692,7 +37895,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21715,7 +37918,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21740,7 +37943,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21753,7 +37956,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21768,7 +37971,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21785,7 +37988,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21798,7 +38001,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21811,7 +38014,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21826,7 +38029,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21843,7 +38046,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21862,7 +38065,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21883,7 +38086,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21906,7 +38109,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21931,7 +38134,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21944,7 +38147,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21959,7 +38162,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21976,7 +38179,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -21989,7 +38192,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22002,7 +38205,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22017,7 +38220,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22034,7 +38237,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22053,7 +38256,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22074,7 +38277,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22097,7 +38300,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22122,7 +38325,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22135,7 +38338,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22150,7 +38353,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22167,7 +38370,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22180,7 +38383,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22193,7 +38396,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22208,7 +38411,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22225,7 +38428,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22244,7 +38447,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22265,7 +38468,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22288,7 +38491,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22313,7 +38516,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_i64m1_mt(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22326,7 +38529,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22341,7 +38544,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22358,7 +38561,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_i64m2_mt(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22371,7 +38574,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_i64m4_mt(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22384,7 +38587,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22399,7 +38602,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22416,7 +38619,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22435,7 +38638,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22456,7 +38659,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22479,7 +38682,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22504,7 +38707,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22517,7 +38720,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22532,7 +38735,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22549,7 +38752,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22568,7 +38771,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22589,7 +38792,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22612,7 +38815,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22637,7 +38840,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22650,7 +38853,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22665,7 +38868,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22682,7 +38885,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22701,7 +38904,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22722,7 +38925,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22745,7 +38948,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22770,7 +38973,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22783,7 +38986,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22798,7 +39001,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22815,7 +39018,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22834,7 +39037,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22855,7 +39058,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22878,7 +39081,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22903,7 +39106,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22916,7 +39119,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22931,7 +39134,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22948,7 +39151,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22961,7 +39164,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22974,7 +39177,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -22989,7 +39192,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23006,7 +39209,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23025,7 +39228,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23046,7 +39249,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23069,7 +39272,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23094,7 +39297,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23107,7 +39310,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23122,7 +39325,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23139,7 +39342,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23158,7 +39361,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23179,7 +39382,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23202,7 +39405,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23227,7 +39430,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23240,7 +39443,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23255,7 +39458,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23272,7 +39475,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23291,7 +39494,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23312,7 +39515,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23335,7 +39538,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23360,7 +39563,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23373,7 +39576,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23388,7 +39591,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23405,7 +39608,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23424,7 +39627,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23445,7 +39648,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23468,7 +39671,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23493,7 +39696,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23506,7 +39709,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23521,7 +39724,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23538,7 +39741,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23551,7 +39754,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u8m4_mt(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23564,7 +39767,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23579,7 +39782,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23596,7 +39799,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23615,7 +39818,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23636,7 +39839,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23659,7 +39862,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23684,7 +39887,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23697,7 +39900,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23712,7 +39915,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23729,7 +39932,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23748,7 +39951,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23769,7 +39972,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23792,7 +39995,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23817,7 +40020,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23830,7 +40033,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23845,7 +40048,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23862,7 +40065,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23881,7 +40084,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23902,7 +40105,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23925,7 +40128,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23950,7 +40153,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23963,7 +40166,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23978,7 +40181,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -23995,7 +40198,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24014,7 +40217,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24035,7 +40238,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24058,7 +40261,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24083,7 +40286,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24096,7 +40299,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24111,7 +40314,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24128,7 +40331,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u8m2_mt(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24141,7 +40344,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24156,7 +40359,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24173,7 +40376,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24192,7 +40395,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24213,7 +40416,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24236,7 +40439,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24261,7 +40464,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u8mf8_mt(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24274,7 +40477,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24289,7 +40492,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24306,7 +40509,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24325,7 +40528,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24346,7 +40549,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24369,7 +40572,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24394,7 +40597,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u8mf4_mt(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24407,7 +40610,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24422,7 +40625,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24439,7 +40642,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24458,7 +40661,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24479,7 +40682,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24502,7 +40705,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24527,7 +40730,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u8mf2_mt(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24540,7 +40743,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24555,7 +40758,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24572,7 +40775,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24591,7 +40794,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24612,7 +40815,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24635,7 +40838,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24660,7 +40863,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u8m1_mt(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24673,7 +40876,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24688,7 +40891,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24705,7 +40908,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24724,7 +40927,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24745,7 +40948,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24768,7 +40971,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24793,7 +40996,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24806,7 +41009,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24821,7 +41024,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24838,7 +41041,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24857,7 +41060,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24878,7 +41081,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24901,7 +41104,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24926,7 +41129,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24939,7 +41142,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24954,7 +41157,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24971,7 +41174,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -24990,7 +41193,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25011,7 +41214,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25034,7 +41237,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25059,7 +41262,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25072,7 +41275,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25087,7 +41290,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25104,7 +41307,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25117,7 +41320,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25130,7 +41333,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25145,7 +41348,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25162,7 +41365,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25181,7 +41384,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25202,7 +41405,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25225,7 +41428,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25250,7 +41453,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25263,7 +41466,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25278,7 +41481,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25295,7 +41498,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25314,7 +41517,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25335,7 +41538,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25358,7 +41561,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25383,7 +41586,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25396,7 +41599,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25411,7 +41614,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25428,7 +41631,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25447,7 +41650,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25468,7 +41671,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25491,7 +41694,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25516,7 +41719,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25529,7 +41732,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25544,7 +41747,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25561,7 +41764,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25574,7 +41777,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25587,7 +41790,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25602,7 +41805,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25619,7 +41822,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25638,7 +41841,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25659,7 +41862,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25682,7 +41885,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25707,7 +41910,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25720,7 +41923,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25735,7 +41938,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25752,7 +41955,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25771,7 +41974,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25792,7 +41995,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25815,7 +42018,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25840,7 +42043,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25853,7 +42056,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25868,7 +42071,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25885,7 +42088,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25904,7 +42107,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25925,7 +42128,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25948,7 +42151,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25973,7 +42176,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -25986,7 +42189,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26001,7 +42204,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26018,7 +42221,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26031,7 +42234,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u16m4_mt(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26044,7 +42247,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26059,7 +42262,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26076,7 +42279,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26095,7 +42298,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26116,7 +42319,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26139,7 +42342,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26164,7 +42367,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u16mf4_mt(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26177,7 +42380,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26192,7 +42395,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26209,7 +42412,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26228,7 +42431,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26249,7 +42452,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26272,7 +42475,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26297,7 +42500,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u16mf2_mt(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26310,7 +42513,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26325,7 +42528,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26342,7 +42545,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26361,7 +42564,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26382,7 +42585,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26405,7 +42608,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26430,7 +42633,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u16m1_mt(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26443,7 +42646,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26458,7 +42661,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26475,7 +42678,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u16m2_mt(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26488,7 +42691,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26503,7 +42706,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26520,7 +42723,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26539,7 +42742,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26560,7 +42763,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26583,7 +42786,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26608,7 +42811,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26621,7 +42824,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26636,7 +42839,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26653,7 +42856,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26672,7 +42875,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26693,7 +42896,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26716,7 +42919,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26741,7 +42944,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26754,7 +42957,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26769,7 +42972,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26786,7 +42989,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26799,7 +43002,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26812,7 +43015,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26827,7 +43030,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26844,7 +43047,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26863,7 +43066,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26884,7 +43087,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26907,7 +43110,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26932,7 +43135,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26945,7 +43148,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26960,7 +43163,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26977,7 +43180,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -26996,7 +43199,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27017,7 +43220,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27040,7 +43243,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27065,7 +43268,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27078,7 +43281,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27093,7 +43296,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27110,7 +43313,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27123,7 +43326,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27136,7 +43339,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27151,7 +43354,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27168,7 +43371,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27187,7 +43390,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27208,7 +43411,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27231,7 +43434,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27256,7 +43459,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27269,7 +43472,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27284,7 +43487,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27301,7 +43504,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27320,7 +43523,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27341,7 +43544,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27364,7 +43567,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27389,7 +43592,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27402,7 +43605,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27417,7 +43620,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27434,7 +43637,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27447,7 +43650,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27460,7 +43663,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27475,7 +43678,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27492,7 +43695,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27511,7 +43714,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27532,7 +43735,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27555,7 +43758,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27580,7 +43783,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u32mf2_mt(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27593,7 +43796,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27608,7 +43811,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27625,7 +43828,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27644,7 +43847,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27665,7 +43868,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27688,7 +43891,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27713,7 +43916,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u32m1_mt(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27726,7 +43929,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27741,7 +43944,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27758,7 +43961,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u32m2_mt(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27771,7 +43974,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u32m4_mt(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27784,7 +43987,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27799,7 +44002,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27816,7 +44019,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27835,7 +44038,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27856,7 +44059,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27879,7 +44082,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27904,7 +44107,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27917,7 +44120,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27932,7 +44135,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27949,7 +44152,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27962,7 +44165,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27975,7 +44178,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -27990,7 +44193,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28007,7 +44210,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28026,7 +44229,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28047,7 +44250,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28070,7 +44273,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28095,7 +44298,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28108,7 +44311,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28123,7 +44326,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28140,7 +44343,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28153,7 +44356,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28166,7 +44369,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28181,7 +44384,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28198,7 +44401,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28217,7 +44420,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28238,7 +44441,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28261,7 +44464,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28286,7 +44489,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28299,7 +44502,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28314,7 +44517,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28331,7 +44534,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28344,7 +44547,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28357,7 +44560,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28372,7 +44575,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28389,7 +44592,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28408,7 +44611,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28429,7 +44632,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28452,7 +44655,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28477,7 +44680,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_u64m1_mt(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28490,7 +44693,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28505,7 +44708,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28522,7 +44725,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_u64m2_mt(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28535,7 +44738,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_u64m4_mt(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28548,7 +44751,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28563,7 +44766,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28580,7 +44783,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28599,7 +44802,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28620,7 +44823,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28643,7 +44846,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28668,7 +44871,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28681,7 +44884,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28696,7 +44899,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28713,7 +44916,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28732,7 +44935,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28753,7 +44956,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28776,7 +44979,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28801,7 +45004,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28814,7 +45017,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28829,7 +45032,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28846,7 +45049,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28865,7 +45068,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28886,7 +45089,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28909,7 +45112,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28934,7 +45137,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28947,7 +45150,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28962,7 +45165,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28979,7 +45182,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -28992,7 +45195,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29005,7 +45208,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29020,7 +45223,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29037,7 +45240,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29056,7 +45259,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29077,7 +45280,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29100,7 +45303,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29125,7 +45328,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29138,7 +45341,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29153,7 +45356,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29170,7 +45373,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29189,7 +45392,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29210,7 +45413,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29233,7 +45436,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29258,7 +45461,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29271,7 +45474,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29286,7 +45489,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29303,7 +45506,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29322,7 +45525,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29343,7 +45546,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29366,7 +45569,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29391,7 +45594,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29404,7 +45607,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29419,7 +45622,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29436,7 +45639,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29449,7 +45652,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29462,7 +45665,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29477,7 +45680,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29494,7 +45697,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29513,7 +45716,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29534,7 +45737,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29557,7 +45760,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29582,7 +45785,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29595,7 +45798,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29610,7 +45813,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29627,7 +45830,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29646,7 +45849,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29667,7 +45870,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29690,7 +45893,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29715,7 +45918,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29728,7 +45931,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29743,7 +45946,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29760,7 +45963,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29779,7 +45982,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29800,7 +46003,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29823,7 +46026,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29848,7 +46051,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29861,7 +46064,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29876,7 +46079,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29893,7 +46096,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29906,7 +46109,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f16m4_mt(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29919,7 +46122,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29934,7 +46137,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29951,7 +46154,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29970,7 +46173,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -29991,7 +46194,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30014,7 +46217,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30039,7 +46242,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f16mf4_mt(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30052,7 +46255,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30067,7 +46270,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30084,7 +46287,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30103,7 +46306,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30124,7 +46327,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30147,7 +46350,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30172,7 +46375,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f16mf2_mt(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30185,7 +46388,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30200,7 +46403,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30217,7 +46420,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30236,7 +46439,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30257,7 +46460,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30280,7 +46483,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30305,7 +46508,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f16m1_mt(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30318,7 +46521,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30333,7 +46536,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30350,7 +46553,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f16m2_mt(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30363,7 +46566,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30378,7 +46581,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30395,7 +46598,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30414,7 +46617,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30435,7 +46638,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30458,7 +46661,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30483,7 +46686,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30496,7 +46699,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30511,7 +46714,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30528,7 +46731,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30547,7 +46750,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30568,7 +46771,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30591,7 +46794,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30616,7 +46819,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30629,7 +46832,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30644,7 +46847,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30661,7 +46864,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30674,7 +46877,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30687,7 +46890,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30702,7 +46905,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30719,7 +46922,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30738,7 +46941,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30759,7 +46962,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30782,7 +46985,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30807,7 +47010,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30820,7 +47023,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30835,7 +47038,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30852,7 +47055,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30871,7 +47074,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30892,7 +47095,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30915,7 +47118,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30940,7 +47143,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30953,7 +47156,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30968,7 +47171,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30985,7 +47188,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -30998,7 +47201,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31011,7 +47214,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31026,7 +47229,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31043,7 +47246,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31062,7 +47265,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31083,7 +47286,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31106,7 +47309,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31131,7 +47334,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31144,7 +47347,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31159,7 +47362,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31176,7 +47379,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31195,7 +47398,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31216,7 +47419,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31239,7 +47442,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31264,7 +47467,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31277,7 +47480,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31292,7 +47495,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31309,7 +47512,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31322,7 +47525,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31335,7 +47538,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31350,7 +47553,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31367,7 +47570,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31386,7 +47589,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31407,7 +47610,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31430,7 +47633,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31455,7 +47658,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f32mf2_mt(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31468,7 +47671,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31483,7 +47686,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31500,7 +47703,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31519,7 +47722,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31540,7 +47743,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31563,7 +47766,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31588,7 +47791,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f32m1_mt(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31601,7 +47804,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31616,7 +47819,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31633,7 +47836,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f32m2_mt(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31646,7 +47849,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f32m4_mt(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31659,7 +47862,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31674,7 +47877,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31691,7 +47894,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31710,7 +47913,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31731,7 +47934,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31754,7 +47957,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31779,7 +47982,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei8_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, size_t ta) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31792,7 +47995,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31807,7 +48010,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31824,7 +48027,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei8_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31837,7 +48040,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei8_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31850,7 +48053,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31865,7 +48068,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31882,7 +48085,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31901,7 +48104,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31922,7 +48125,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31945,7 +48148,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31970,7 +48173,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei16_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, size_t ta) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31983,7 +48186,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -31998,7 +48201,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32015,7 +48218,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei16_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32028,7 +48231,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei16_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32041,7 +48244,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32056,7 +48259,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32073,7 +48276,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32092,7 +48295,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32113,7 +48316,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32136,7 +48339,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32161,7 +48364,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei32_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, size_t ta) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32174,7 +48377,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32189,7 +48392,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32206,7 +48409,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei32_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32219,7 +48422,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei32_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32232,7 +48435,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32247,7 +48450,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32264,7 +48467,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32283,7 +48486,7 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg5ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32304,7 +48507,7 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg6ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32327,7 +48530,7 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg7ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32352,7 +48555,7 @@ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg8ei64_v_f64m1_mt(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, size_t ta) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32365,7 +48568,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32380,7 +48583,7 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg3ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32397,7 +48600,7 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg4ei64_v_f64m2_mt(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, size_t ta) { return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } @@ -32410,7 +48613,7 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { +void test_vluxseg2ei64_v_f64m4_mt(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, size_t ta) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c @@ -129,3 +129,4 @@ vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmandnot(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmax(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -103,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -122,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -141,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfeq(op1, op2, vl); } @@ -173,14 +275,120 @@ return vmfeq(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -189,8 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -199,9 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -210,8 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -220,9 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -231,8 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -241,9 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -252,8 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -262,9 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -273,8 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -283,9 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -294,8 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -304,9 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -315,8 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -325,9 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -336,8 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -346,9 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } @@ -357,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c @@ -1,16 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -19,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -28,7 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -37,7 +145,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -46,7 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -55,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -64,7 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -73,7 +181,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -82,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -91,7 +199,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -100,7 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -109,7 +217,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -118,7 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -127,7 +235,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -136,7 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -145,7 +253,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -154,7 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfge(op1, op2, vl); } @@ -163,16 +271,124 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmfge(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -181,7 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -190,7 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -199,7 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -208,7 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -217,7 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -226,7 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -235,7 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -244,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -253,7 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -262,7 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -271,7 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -280,7 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -289,7 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -298,7 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -307,7 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -316,7 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } @@ -325,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmfge(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c @@ -1,16 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -19,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -28,7 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -37,7 +145,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -46,7 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -55,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -64,7 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -73,7 +181,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -82,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -91,7 +199,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -100,7 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -109,7 +217,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -118,7 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -127,7 +235,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -136,7 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -145,7 +253,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -154,7 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfgt(op1, op2, vl); } @@ -163,16 +271,124 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmfgt(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -181,7 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -190,7 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -199,7 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -208,7 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -217,7 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -226,7 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -235,7 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -244,7 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -253,7 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -262,7 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -271,7 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -280,7 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -289,7 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -298,7 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -307,7 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -316,7 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } @@ -325,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmfgt(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -103,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -122,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -141,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfle(op1, op2, vl); } @@ -173,14 +275,120 @@ return vmfle(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -189,8 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -199,9 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -210,8 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -220,9 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -231,8 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -241,9 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -252,8 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -262,9 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -273,8 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -283,9 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -294,8 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -304,9 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -315,8 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -325,9 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -336,8 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -346,9 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } @@ -357,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -103,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -122,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -141,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmflt(op1, op2, vl); } @@ -173,14 +275,120 @@ return vmflt(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -189,8 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -199,9 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -210,8 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -220,9 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -231,8 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -241,9 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -252,8 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -262,9 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -273,8 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -283,9 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -294,8 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -304,9 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -315,8 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -325,9 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -336,8 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -346,9 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } @@ -357,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c @@ -1,17 +1,124 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -29,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -48,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -103,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -122,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -141,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfne(op1, op2, vl); } @@ -173,14 +275,120 @@ return vmfne(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -189,8 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -199,9 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -210,8 +415,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -220,9 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -231,8 +433,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -241,9 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -252,8 +451,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -262,9 +460,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -273,8 +469,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -283,9 +478,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -294,8 +487,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -304,9 +496,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -315,8 +505,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -325,9 +514,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -336,8 +523,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -346,9 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } @@ -357,7 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmin(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv64i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmmv_m_b1 (vbool1_t op1, size_t vl) { +vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) { return vmmv(op1, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv32i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmmv_m_b2 (vbool2_t op1, size_t vl) { +vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) { return vmmv(op1, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv16i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmmv_m_b4 (vbool4_t op1, size_t vl) { +vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) { return vmmv(op1, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv8i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmmv_m_b8 (vbool8_t op1, size_t vl) { +vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) { return vmmv(op1, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv4i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmmv_m_b16 (vbool16_t op1, size_t vl) { +vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) { return vmmv(op1, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv2i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmmv_m_b32 (vbool32_t op1, size_t vl) { +vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) { return vmmv(op1, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv1i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmmv_m_b64 (vbool64_t op1, size_t vl) { +vbool64_t test_vmmv_m_b64(vbool64_t op1, size_t vl) { return vmmv(op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c @@ -66,3 +66,4 @@ vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmnand(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c @@ -66,3 +66,4 @@ vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmnor(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv64i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmnot_m_b1 (vbool1_t op1, size_t vl) { +vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) { return vmnot(op1, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv32i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmnot_m_b2 (vbool2_t op1, size_t vl) { +vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) { return vmnot(op1, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv16i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmnot_m_b4 (vbool4_t op1, size_t vl) { +vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) { return vmnot(op1, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv8i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmnot_m_b8 (vbool8_t op1, size_t vl) { +vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) { return vmnot(op1, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv4i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmnot_m_b16 (vbool16_t op1, size_t vl) { +vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) { return vmnot(op1, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv2i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmnot_m_b32 (vbool32_t op1, size_t vl) { +vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) { return vmnot(op1, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv1i1.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmnot_m_b64 (vbool64_t op1, size_t vl) { +vbool64_t test_vmnot_m_b64(vbool64_t op1, size_t vl) { return vmnot(op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c @@ -129,3 +129,4 @@ vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmornot(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c @@ -9,8 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -19,8 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -47,8 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -57,8 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -85,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -95,8 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -123,8 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -133,8 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -161,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, - size_t vl) { +vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -171,8 +162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, - size_t vl) { +vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -199,8 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, - size_t vl) { +vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -209,8 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, - size_t vl) { +vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -237,8 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, - size_t vl) { +vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -247,8 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, - size_t vl) { +vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -275,8 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -285,8 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -295,8 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -314,8 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -324,8 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -334,8 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -353,8 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -363,8 +342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -391,8 +369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -401,8 +378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -429,8 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -439,8 +414,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, - size_t vl) { +vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -467,8 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, - vbool2_t borrowin, size_t vl) { +vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -477,8 +450,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, - size_t vl) { +vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -505,8 +477,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -515,8 +486,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -525,8 +495,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -544,8 +513,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -554,8 +522,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -582,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -592,8 +558,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -620,8 +585,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -630,8 +594,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -658,8 +621,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -668,8 +630,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, - size_t vl) { +vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -696,8 +657,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -706,8 +666,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -734,8 +693,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -744,8 +702,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -772,8 +729,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -782,8 +738,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -810,8 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -820,8 +774,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -848,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -858,8 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -886,8 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -896,8 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -924,8 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -934,8 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -962,8 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -972,8 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, - size_t vl) { +vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1000,8 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1010,8 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, - size_t vl) { +vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1038,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, - vbool2_t borrowin, size_t vl) { +vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1048,8 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, - size_t vl) { +vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1076,8 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, - vbool1_t borrowin, size_t vl) { +vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1086,8 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, - size_t vl) { +vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1114,8 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1124,8 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1134,8 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -1153,8 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1163,8 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1173,8 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -1192,8 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1202,8 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1230,8 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1240,8 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1268,8 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1278,8 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1306,8 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, - vbool2_t borrowin, size_t vl) { +vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1316,8 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, - vbool2_t borrowin, size_t vl) { +vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1344,8 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1354,8 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1364,8 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsbc(op1, op2, vl); } @@ -1383,8 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1393,8 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1421,8 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1431,8 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1459,8 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1469,8 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1497,8 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1507,8 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, - vbool4_t borrowin, size_t vl) { +vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1535,8 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1545,8 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, - vbool64_t borrowin, size_t vl) { +vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1573,8 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1583,8 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, - vbool32_t borrowin, size_t vl) { +vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1611,8 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1621,8 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, - vbool16_t borrowin, size_t vl) { +vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1649,8 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1659,8 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, - vbool8_t borrowin, size_t vl) { +vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { return vmsbc(op1, op2, borrowin, vl); } @@ -1681,3 +1587,4 @@ vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsbc(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c @@ -9,57 +9,70 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); } +vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { + return vmsbf(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, - size_t vl) { +vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -68,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, - size_t vl) { +vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -78,8 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, - size_t vl) { +vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -88,8 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, - size_t vl) { +vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -98,8 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, - size_t vl) { +vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -108,8 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, - size_t vl) { +vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } @@ -118,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, - size_t vl) { +vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -534,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -553,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -644,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmseq(op1, op2, vl); } @@ -807,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -817,8 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -827,8 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -837,8 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -847,8 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -857,8 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -867,8 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -877,8 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -887,8 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -897,8 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -907,8 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -917,8 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -927,8 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -937,8 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -947,9 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -958,8 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -968,9 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -979,8 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -989,8 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -999,8 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1009,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1019,8 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1029,8 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1039,8 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1049,8 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1059,8 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1069,9 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1080,8 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1090,8 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1100,8 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1110,8 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1120,8 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1130,8 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1140,8 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1150,8 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1160,8 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1170,8 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1180,8 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1190,8 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1200,8 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1210,8 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1220,8 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1230,8 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1240,8 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1250,9 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1261,8 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1271,9 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1282,8 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1292,9 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1303,8 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1313,8 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1323,8 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1333,8 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1343,8 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1353,8 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1363,8 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1373,8 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1383,8 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1393,9 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1404,9 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1415,9 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1426,9 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1437,9 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1448,8 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1458,8 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1468,8 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1478,8 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1488,8 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1498,8 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1508,8 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1518,9 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1529,9 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1540,9 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1551,8 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1561,9 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1572,8 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1582,8 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1592,8 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1602,8 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1612,8 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1622,9 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1633,8 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1643,9 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1654,8 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1664,9 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1675,8 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1685,8 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } @@ -1695,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -81,7 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -99,7 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -108,7 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -126,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -135,7 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -144,7 +144,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -162,7 +162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -189,7 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -198,7 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -216,7 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -225,7 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -234,7 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -261,7 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -288,7 +288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -297,7 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -306,7 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -324,7 +324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -342,7 +342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -351,7 +351,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -369,7 +369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -378,7 +378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -387,7 +387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -396,7 +396,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { return vmsge(op1, op2, vl); } @@ -405,7 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -414,7 +414,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -432,7 +432,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -441,7 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -459,7 +459,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -468,7 +468,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -477,7 +477,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -486,7 +486,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -495,7 +495,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -504,7 +504,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -522,7 +522,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -531,7 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -549,7 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -558,7 +558,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -567,7 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -576,7 +576,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -585,7 +585,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -594,7 +594,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -612,7 +612,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -621,7 +621,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -639,7 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -648,7 +648,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -657,7 +657,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -666,7 +666,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -675,7 +675,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -684,7 +684,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -702,7 +702,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -711,7 +711,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -729,7 +729,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -738,7 +738,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -747,7 +747,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -756,7 +756,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -765,7 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -774,7 +774,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -792,7 +792,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsgeu(op1, op2, vl); } @@ -801,7 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -819,7 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -828,7 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -837,7 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -846,7 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -855,7 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -864,7 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -882,7 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -891,7 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -909,7 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -918,7 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -927,7 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -936,7 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -945,7 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -954,7 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -972,7 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -981,7 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -999,7 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1008,7 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1017,7 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1026,7 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1035,7 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1044,7 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1062,7 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1071,7 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1089,7 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1098,7 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1107,7 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1116,7 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1125,7 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1134,7 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1152,7 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1161,7 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1179,7 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1188,7 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmsge(mask, maskedoff, op1, op2, vl); } @@ -1197,7 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1206,7 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1215,7 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1224,7 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1233,7 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1242,7 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1251,7 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1269,7 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1278,7 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1287,7 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1296,7 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1305,7 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1314,7 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1332,7 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1341,7 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1359,7 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1368,7 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1377,7 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1386,7 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1395,7 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1404,7 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1413,7 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1422,7 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1431,7 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1440,7 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1449,7 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1458,7 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1467,7 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1476,7 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1485,7 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1494,7 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1503,7 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1512,7 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1521,7 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1530,7 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1539,7 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1548,7 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1557,7 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1566,7 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1575,7 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } @@ -1584,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsgeu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -81,7 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -99,7 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -108,7 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -126,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -135,7 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -144,7 +144,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -162,7 +162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -189,7 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -198,7 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -216,7 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -225,7 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -234,7 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -261,7 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -288,7 +288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -297,7 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -306,7 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -324,7 +324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -342,7 +342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -351,7 +351,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -360,7 +360,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -369,7 +369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -378,7 +378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -387,7 +387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -396,7 +396,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { return vmsgt(op1, op2, vl); } @@ -405,7 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -414,7 +414,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -432,7 +432,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -441,7 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -450,7 +450,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -459,7 +459,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -468,7 +468,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -477,7 +477,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -486,7 +486,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -495,7 +495,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -504,7 +504,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -522,7 +522,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -531,7 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -540,7 +540,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -549,7 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -558,7 +558,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -567,7 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -576,7 +576,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -585,7 +585,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -594,7 +594,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -612,7 +612,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -621,7 +621,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -630,7 +630,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -639,7 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -648,7 +648,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -657,7 +657,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -666,7 +666,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -675,7 +675,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -684,7 +684,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -702,7 +702,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -711,7 +711,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -720,7 +720,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -729,7 +729,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -738,7 +738,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -747,7 +747,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -756,7 +756,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -765,7 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -774,7 +774,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -792,7 +792,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsgtu(op1, op2, vl); } @@ -801,7 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -819,7 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -828,7 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -837,7 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -846,7 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -855,7 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -864,7 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -882,7 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -891,7 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -909,7 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -918,7 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -927,7 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -936,7 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -945,7 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -954,7 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -972,7 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -981,7 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -999,7 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1008,7 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1017,7 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1026,7 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1035,7 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1044,7 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1062,7 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1071,7 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1089,7 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1098,7 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1107,7 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1116,7 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1125,7 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1134,7 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1152,7 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1161,7 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1179,7 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1188,7 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmsgt(mask, maskedoff, op1, op2, vl); } @@ -1197,7 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1206,7 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1215,7 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1224,7 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1233,7 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1242,7 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1251,7 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1269,7 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1278,7 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1287,7 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1296,7 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1305,7 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1314,7 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1332,7 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1341,7 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1359,7 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1368,7 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1377,7 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1386,7 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1395,7 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1404,7 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1413,7 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1422,7 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1431,7 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1440,7 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1449,7 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1458,7 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1467,7 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1476,7 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1485,7 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1494,7 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1503,7 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1512,7 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1521,7 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1530,7 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1539,7 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1548,7 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1557,7 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1566,7 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1575,7 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } @@ -1584,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsgtu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c @@ -9,57 +9,70 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); } +vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); } +vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); } +vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); } +vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); } +vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); } +vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); } +vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { + return vmsif(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, - size_t vl) { +vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -68,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, - size_t vl) { +vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -78,8 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, - size_t vl) { +vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -88,8 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, - size_t vl) { +vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -98,8 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, - size_t vl) { +vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -108,8 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, - size_t vl) { +vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } @@ -118,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, - size_t vl) { +vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { return vmsif(mask, maskedoff, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsle(op1, op2, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsle(op1, op2, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsle(op1, op2, vl); } @@ -408,8 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -427,8 +423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -446,8 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -537,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -556,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -575,8 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -648,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -667,8 +657,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -686,8 +675,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -741,8 +729,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -760,8 +747,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -779,8 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsleu(op1, op2, vl); } @@ -816,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -826,8 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -836,8 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -846,8 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -856,8 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -866,8 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -876,8 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -886,8 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -896,8 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -906,8 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -916,8 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -926,8 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -936,8 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -946,8 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -956,9 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -967,8 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -977,9 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -988,8 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -998,8 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1008,8 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1018,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1028,8 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1038,8 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1048,8 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1058,8 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1068,8 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1078,9 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1089,8 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1099,8 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1109,8 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1119,8 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1129,8 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1139,8 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1149,8 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1159,8 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1169,8 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1179,8 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1189,8 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1199,8 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1209,8 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1219,8 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1229,8 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1239,8 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1249,8 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmsle(mask, maskedoff, op1, op2, vl); } @@ -1259,9 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1270,8 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1280,9 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1291,8 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1301,9 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1312,8 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1322,8 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1332,8 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1342,8 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1352,8 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1362,8 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1372,8 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1382,8 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1392,8 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1402,9 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1413,9 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1424,9 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1435,9 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1446,9 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1457,8 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1467,9 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1478,8 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1488,9 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1499,8 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1509,9 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1520,8 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1530,9 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1541,9 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1552,9 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1563,8 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1573,9 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1584,8 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1594,9 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1605,8 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1615,9 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1626,8 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1636,9 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1647,8 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1657,9 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1668,8 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1678,9 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1689,8 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1699,9 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } @@ -1710,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmslt(op1, op2, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmslt(op1, op2, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmslt(op1, op2, vl); } @@ -408,8 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -427,8 +423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -446,8 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -537,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -556,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -575,8 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -648,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -667,8 +657,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -686,8 +675,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -741,8 +729,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -760,8 +747,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -779,8 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsltu(op1, op2, vl); } @@ -816,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -826,8 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -836,8 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -846,8 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -856,8 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -866,8 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -876,8 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -886,8 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -896,8 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -906,8 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -916,8 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -926,8 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -936,8 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -946,8 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -956,9 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -967,8 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -977,9 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -988,8 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -998,8 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1008,8 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1018,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1028,8 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1038,8 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1048,8 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1058,8 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1068,8 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1078,9 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1089,8 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1099,8 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1109,8 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1119,8 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1129,8 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1139,8 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1149,8 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1159,8 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1169,8 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1179,8 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1189,8 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1199,8 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1209,8 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1219,8 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1229,8 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1239,8 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1249,8 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmslt(mask, maskedoff, op1, op2, vl); } @@ -1259,9 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1270,8 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1280,9 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1291,8 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1301,9 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1312,8 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1322,8 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1332,8 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1342,8 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1352,8 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1362,8 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1372,8 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1382,8 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1392,8 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1402,9 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1413,9 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1424,9 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1435,9 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1446,9 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1457,8 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1467,9 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1478,8 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1488,9 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1499,8 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1509,9 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1520,8 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1530,9 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1541,9 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1552,9 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1563,8 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1573,9 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1584,8 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1594,9 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1605,8 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1615,9 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1626,8 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1636,9 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1647,8 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1657,9 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1668,8 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1678,9 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1689,8 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1699,9 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } @@ -1710,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -534,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -553,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -644,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsne(op1, op2, vl); } @@ -807,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -817,8 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -827,8 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -837,8 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -847,8 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -857,8 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -867,8 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -877,8 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -887,8 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -897,8 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -907,8 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -917,8 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -927,8 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -937,8 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -947,9 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -958,8 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -968,9 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -979,8 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -989,8 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -999,8 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1009,8 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1019,8 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1029,8 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1039,8 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1049,8 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1059,8 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1069,9 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1080,8 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1090,8 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1100,8 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1110,8 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1120,8 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1130,8 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1140,8 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1150,8 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1160,8 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1170,8 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1180,8 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1190,8 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1200,8 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1210,8 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1220,8 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1230,8 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1240,8 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1250,9 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1261,8 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1271,9 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1282,8 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1292,9 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1303,8 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1313,8 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1323,8 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1333,8 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1343,8 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1353,8 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1363,8 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1373,8 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1383,8 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1393,9 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1404,9 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1415,9 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1426,9 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1437,9 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1448,8 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1458,8 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1468,8 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1478,8 +1395,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1488,8 +1404,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1498,8 +1413,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1508,8 +1422,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1518,9 +1431,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1529,9 +1440,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1540,9 +1449,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1551,8 +1458,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1561,9 +1467,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1572,8 +1476,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1582,8 +1485,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1592,8 +1494,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1602,8 +1503,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1612,8 +1512,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1622,9 +1521,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1633,8 +1530,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1643,9 +1539,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1654,8 +1548,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1664,9 +1557,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1675,8 +1566,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1685,8 +1575,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } @@ -1695,7 +1584,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c @@ -9,57 +9,70 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); } +vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); } +vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); } +vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); } +vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); } +vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); } +vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); } +vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { + return vmsof(op1, vl); +} // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, - size_t vl) { +vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -68,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, - size_t vl) { +vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -78,8 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, - size_t vl) { +vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -88,8 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, - size_t vl) { +vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -98,8 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, - size_t vl) { +vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -108,8 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, - size_t vl) { +vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } @@ -118,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, - size_t vl) { +vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { return vmsof(mask, maskedoff, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -1990,7 +1990,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( @@ -1999,7 +1999,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( @@ -2008,7 +2008,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( @@ -2017,7 +2017,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( @@ -2026,7 +2026,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( @@ -2035,7 +2035,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( @@ -2044,7 +2044,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( @@ -2053,7 +2053,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( @@ -2062,7 +2062,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( @@ -2071,7 +2071,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( @@ -2080,7 +2080,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( @@ -2089,7 +2089,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( @@ -2098,7 +2098,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( @@ -2107,7 +2107,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( @@ -2116,7 +2116,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( @@ -2125,7 +2125,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( @@ -2134,7 +2134,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( @@ -2143,7 +2143,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( @@ -2152,7 +2152,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( @@ -2161,7 +2161,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( @@ -2170,7 +2170,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( @@ -2179,7 +2179,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( @@ -2188,7 +2188,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( @@ -2197,7 +2197,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( @@ -2206,7 +2206,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( @@ -2215,7 +2215,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( @@ -2224,7 +2224,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( @@ -2233,7 +2233,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( @@ -2242,7 +2242,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( @@ -2251,7 +2251,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( @@ -2260,7 +2260,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( @@ -2269,7 +2269,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( @@ -2278,7 +2278,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( @@ -2287,7 +2287,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( @@ -2296,7 +2296,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( @@ -2305,7 +2305,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( @@ -2314,7 +2314,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( @@ -2323,7 +2323,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( @@ -2332,7 +2332,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( @@ -2341,7 +2341,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( @@ -2350,7 +2350,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( @@ -2359,7 +2359,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( @@ -2368,7 +2368,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( @@ -2377,7 +2377,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( @@ -2386,7 +2386,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( @@ -2395,7 +2395,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( @@ -2404,7 +2404,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( @@ -2413,7 +2413,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( @@ -2422,7 +2422,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( @@ -2431,7 +2431,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( @@ -2440,7 +2440,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( @@ -2449,7 +2449,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( @@ -2458,7 +2458,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( @@ -2467,7 +2467,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( @@ -2476,7 +2476,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( @@ -2485,7 +2485,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( @@ -2494,7 +2494,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( @@ -2503,7 +2503,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( @@ -2512,7 +2512,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( @@ -2521,7 +2521,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( @@ -2530,7 +2530,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( @@ -2539,7 +2539,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( @@ -2548,7 +2548,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( @@ -2557,7 +2557,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( @@ -2566,7 +2566,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( @@ -2575,7 +2575,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( @@ -2584,7 +2584,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( @@ -2593,7 +2593,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( @@ -2602,7 +2602,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( @@ -2611,7 +2611,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( @@ -2620,7 +2620,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( @@ -2629,7 +2629,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( @@ -2638,7 +2638,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( @@ -2647,7 +2647,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( @@ -2656,7 +2656,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( @@ -2665,7 +2665,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( @@ -2674,7 +2674,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( @@ -2683,7 +2683,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( @@ -2692,7 +2692,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( @@ -2701,7 +2701,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( @@ -2710,7 +2710,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( @@ -2719,7 +2719,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( @@ -2728,7 +2728,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( @@ -2737,7 +2737,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( @@ -2746,7 +2746,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( @@ -2755,7 +2755,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( @@ -2764,7 +2764,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( @@ -2773,7 +2773,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( @@ -2782,7 +2782,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( @@ -2791,7 +2791,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( @@ -2800,7 +2800,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( @@ -2809,7 +2809,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( @@ -2818,7 +2818,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( @@ -2827,7 +2827,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( @@ -2836,7 +2836,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( @@ -2845,7 +2845,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( @@ -2854,7 +2854,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( @@ -2863,7 +2863,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( @@ -2872,7 +2872,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( @@ -2881,7 +2881,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( @@ -2890,7 +2890,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( @@ -2899,7 +2899,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( @@ -2908,7 +2908,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( @@ -2917,7 +2917,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( @@ -2926,7 +2926,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( @@ -2935,7 +2935,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( @@ -2944,7 +2944,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( @@ -2953,7 +2953,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( @@ -2962,7 +2962,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( @@ -2971,7 +2971,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( @@ -2980,7 +2980,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( @@ -2989,7 +2989,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( @@ -2998,7 +2998,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( @@ -3007,7 +3007,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( @@ -3016,7 +3016,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( @@ -3025,7 +3025,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( @@ -3034,7 +3034,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( @@ -3043,7 +3043,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( @@ -3052,7 +3052,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( @@ -3061,7 +3061,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( @@ -3070,7 +3070,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( @@ -3079,7 +3079,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( @@ -3088,7 +3088,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( @@ -3097,7 +3097,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( @@ -3106,7 +3106,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( @@ -3115,7 +3115,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( @@ -3124,7 +3124,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( @@ -3133,7 +3133,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( @@ -3142,7 +3142,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( @@ -3151,7 +3151,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( @@ -3160,7 +3160,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( @@ -3169,7 +3169,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulh(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( @@ -3178,7 +3178,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( @@ -3187,7 +3187,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( @@ -3196,7 +3196,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( @@ -3205,7 +3205,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( @@ -3214,7 +3214,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( @@ -3223,7 +3223,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( @@ -3232,7 +3232,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( @@ -3241,7 +3241,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( @@ -3250,7 +3250,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( @@ -3259,7 +3259,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( @@ -3268,7 +3268,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( @@ -3277,7 +3277,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( @@ -3286,7 +3286,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( @@ -3295,7 +3295,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( @@ -3304,7 +3304,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( @@ -3313,7 +3313,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( @@ -3322,7 +3322,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( @@ -3331,7 +3331,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( @@ -3340,7 +3340,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( @@ -3349,7 +3349,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( @@ -3358,7 +3358,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( @@ -3367,7 +3367,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( @@ -3376,7 +3376,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( @@ -3385,7 +3385,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( @@ -3394,7 +3394,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( @@ -3403,7 +3403,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( @@ -3412,7 +3412,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( @@ -3421,7 +3421,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( @@ -3430,7 +3430,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( @@ -3439,7 +3439,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( @@ -3448,7 +3448,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( @@ -3457,7 +3457,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( @@ -3466,7 +3466,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( @@ -3475,7 +3475,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( @@ -3484,7 +3484,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( @@ -3493,7 +3493,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( @@ -3502,7 +3502,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( @@ -3511,7 +3511,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( @@ -3520,7 +3520,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( @@ -3529,7 +3529,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( @@ -3538,7 +3538,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( @@ -3547,7 +3547,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( @@ -3556,7 +3556,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( @@ -3565,7 +3565,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( @@ -3574,7 +3574,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( @@ -3583,7 +3583,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( @@ -3592,7 +3592,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( @@ -3601,7 +3601,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( @@ -3610,7 +3610,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( @@ -3619,7 +3619,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( @@ -3628,7 +3628,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( @@ -3637,7 +3637,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( @@ -3646,7 +3646,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( @@ -3655,7 +3655,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( @@ -3664,7 +3664,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( @@ -3673,7 +3673,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( @@ -3682,7 +3682,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( @@ -3691,7 +3691,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( @@ -3700,7 +3700,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( @@ -3709,7 +3709,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( @@ -3718,7 +3718,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( @@ -3727,7 +3727,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( @@ -3736,7 +3736,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( @@ -3745,7 +3745,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( @@ -3754,7 +3754,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( @@ -3763,7 +3763,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( @@ -3772,7 +3772,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( @@ -3781,7 +3781,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( @@ -3790,7 +3790,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( @@ -3799,7 +3799,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( @@ -3808,7 +3808,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( @@ -3817,7 +3817,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( @@ -3826,7 +3826,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( @@ -3835,7 +3835,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( @@ -3844,7 +3844,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( @@ -3853,7 +3853,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( @@ -3862,7 +3862,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( @@ -3871,7 +3871,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( @@ -3880,7 +3880,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( @@ -3889,7 +3889,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( @@ -3898,7 +3898,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( @@ -3907,7 +3907,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( @@ -3916,7 +3916,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( @@ -3925,7 +3925,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( @@ -3934,7 +3934,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( @@ -3943,7 +3943,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( @@ -3952,7 +3952,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( @@ -3961,5 +3961,1986 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c @@ -66,3 +66,4 @@ vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmxnor(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c @@ -66,3 +66,4 @@ vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { return vmxor(op1, op2, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -117,8 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnclip(op1, shift, vl); } @@ -136,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, - size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnclip(op1, shift, vl); } @@ -209,8 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, - size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnclip(op1, shift, vl); } @@ -282,8 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, - size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -301,8 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, - size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -320,8 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, - size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -393,8 +387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -412,8 +405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, - size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -431,8 +423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, - size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -450,8 +441,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, - size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -469,8 +459,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, - size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -488,8 +477,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, - size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -507,8 +495,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, - size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -526,8 +513,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, - size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -545,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, - size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { return vnclipu(op1, shift, vl); } @@ -564,10 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint16mf4_t op1, vuint8mf8_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( @@ -575,9 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( @@ -585,10 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint16mf2_t op1, vuint8mf4_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( @@ -596,9 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( @@ -606,10 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint16m1_t op1, vuint8mf2_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( @@ -617,9 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint16m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( @@ -627,9 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( @@ -637,9 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint16m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( @@ -647,9 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( @@ -657,9 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint16m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( @@ -667,9 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( @@ -677,9 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint16m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( @@ -687,10 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( @@ -698,9 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( @@ -708,10 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint32m1_t op1, vuint16mf2_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( @@ -719,9 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint32m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( @@ -729,10 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint32m2_t op1, vuint16m1_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( @@ -740,9 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint32m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( @@ -750,10 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint32m4_t op1, vuint16m2_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( @@ -761,9 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint32m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( @@ -771,10 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint32m8_t op1, vuint16m4_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( @@ -782,9 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint32m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( @@ -792,10 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint64m1_t op1, vuint32mf2_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( @@ -803,9 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint64m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( @@ -813,10 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint64m2_t op1, vuint32m1_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( @@ -824,9 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint64m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( @@ -834,10 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint64m4_t op1, vuint32m2_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( @@ -845,9 +792,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint64m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( @@ -855,10 +801,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint64m8_t op1, vuint32m4_t shift, - size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( @@ -866,9 +810,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint64m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( @@ -876,10 +819,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( @@ -887,9 +828,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( @@ -897,10 +837,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( @@ -908,9 +846,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( @@ -918,10 +855,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint16m1_t op1, vuint8mf2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( @@ -929,9 +864,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( @@ -939,10 +873,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint16m2_t op1, vuint8m1_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( @@ -950,9 +882,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( @@ -960,10 +891,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint16m4_t op1, vuint8m2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( @@ -971,9 +900,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( @@ -981,10 +909,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint16m8_t op1, vuint8m4_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( @@ -992,9 +918,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( @@ -1002,10 +927,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( @@ -1013,10 +936,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint32mf2_t op1, size_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( @@ -1024,10 +945,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint32m1_t op1, vuint16mf2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( @@ -1035,10 +954,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint32m1_t op1, size_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( @@ -1046,10 +963,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint32m2_t op1, vuint16m1_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( @@ -1057,9 +972,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( @@ -1067,10 +981,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint32m4_t op1, vuint16m2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( @@ -1078,9 +990,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( @@ -1088,10 +999,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint32m8_t op1, vuint16m4_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( @@ -1099,9 +1008,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( @@ -1109,10 +1017,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint64m1_t op1, vuint32mf2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( @@ -1120,10 +1026,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint64m1_t op1, size_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( @@ -1131,10 +1035,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint64m2_t op1, vuint32m1_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( @@ -1142,9 +1044,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( @@ -1152,10 +1053,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint64m4_t op1, vuint32m2_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( @@ -1163,9 +1062,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( @@ -1173,10 +1071,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint64m8_t op1, vuint32m4_t shift, - size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( @@ -1184,7 +1080,547 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint64m8_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl, size_t ta) { return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { +vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { +vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { +vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { +vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { +vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { +vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { +vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { +vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -81,7 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { +vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { +vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -99,7 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { +vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -108,7 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { +vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { +vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -126,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { +vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -135,7 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { +vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -144,7 +144,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { +vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { +vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -162,7 +162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { +vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { +vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { +vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -189,7 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { +vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -198,7 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { +vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { +vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -216,7 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { +vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -225,7 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { +vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -234,7 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { +vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { +vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) { return vncvt_x(src, vl); } @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { +vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) { return vncvt_x(src, vl); } @@ -261,7 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { +vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) { return vncvt_x(src, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) { return vncvt_x(src, vl); } @@ -279,8 +279,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vncvt_x_x_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( @@ -288,8 +288,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vncvt_x_x_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( @@ -297,8 +297,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vncvt_x_x_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( @@ -306,8 +306,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vncvt_x_x_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( @@ -315,8 +315,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vncvt_x_x_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( @@ -324,8 +324,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vncvt_x_x_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( @@ -333,8 +333,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vncvt_x_x_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( @@ -342,8 +342,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vncvt_x_x_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( @@ -351,8 +351,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vncvt_x_x_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( @@ -360,8 +360,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vncvt_x_x_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( @@ -369,8 +369,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vncvt_x_x_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( @@ -378,8 +378,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vncvt_x_x_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( @@ -387,8 +387,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vncvt_x_x_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( @@ -396,8 +396,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vncvt_x_x_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( @@ -405,8 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vncvt_x_x_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( @@ -414,8 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vncvt_x_x_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( @@ -423,8 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vncvt_x_x_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( @@ -432,8 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vncvt_x_x_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( @@ -441,8 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vncvt_x_x_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( @@ -450,8 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vncvt_x_x_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( @@ -459,8 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vncvt_x_x_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( @@ -468,8 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vncvt_x_x_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( @@ -477,8 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vncvt_x_x_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( @@ -486,8 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vncvt_x_x_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( @@ -495,8 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vncvt_x_x_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( @@ -504,8 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vncvt_x_x_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( @@ -513,8 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vncvt_x_x_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( @@ -522,8 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vncvt_x_x_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( @@ -531,8 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( @@ -540,7 +540,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { + return vncvt_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vncvt_x_x_w_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vncvt_x_x_w_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vncvt_x_x_w_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vncvt_x_x_w_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vncvt_x_x_w_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vncvt_x_x_w_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vncvt_x_x_w_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vncvt_x_x_w_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vncvt_x_x_w_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vncvt_x_x_w_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vncvt_x_x_w_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vncvt_x_x_w_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vncvt_x_x_w_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vncvt_x_x_w_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vncvt_x_x_w_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vncvt_x_x_w_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vncvt_x_x_w_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vncvt_x_x_w_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vncvt_x_x_w_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vncvt_x_x_w_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vncvt_x_x_w_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vncvt_x_x_w_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vncvt_x_x_w_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vncvt_x_x_w_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vncvt_x_x_w_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vncvt_x_x_w_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vncvt_x_x_w_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl, size_t ta) { + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vncvt_x_x_w_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl, size_t ta) { return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { +vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) { return vneg(op1, vl); } @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { +vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) { return vneg(op1, vl); } @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { +vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) { return vneg(op1, vl); } @@ -37,7 +37,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { +vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) { return vneg(op1, vl); } @@ -46,7 +46,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { +vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) { return vneg(op1, vl); } @@ -55,7 +55,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { +vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) { return vneg(op1, vl); } @@ -64,7 +64,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { +vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) { return vneg(op1, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { +vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) { return vneg(op1, vl); } @@ -82,7 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { +vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) { return vneg(op1, vl); } @@ -91,7 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { +vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) { return vneg(op1, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { +vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) { return vneg(op1, vl); } @@ -109,7 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { +vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) { return vneg(op1, vl); } @@ -118,7 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { +vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) { return vneg(op1, vl); } @@ -127,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { +vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) { return vneg(op1, vl); } @@ -136,7 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { +vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) { return vneg(op1, vl); } @@ -145,7 +145,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { +vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) { return vneg(op1, vl); } @@ -154,7 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { +vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) { return vneg(op1, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { +vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) { return vneg(op1, vl); } @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { +vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) { return vneg(op1, vl); } @@ -181,7 +181,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { +vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) { return vneg(op1, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { +vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) { return vneg(op1, vl); } @@ -199,7 +199,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { +vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) { return vneg(op1, vl); } @@ -208,8 +208,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( @@ -217,8 +217,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( @@ -226,8 +226,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( @@ -235,8 +235,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( @@ -244,8 +244,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( @@ -253,8 +253,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( @@ -262,8 +262,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( @@ -271,8 +271,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( @@ -280,8 +280,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( @@ -289,8 +289,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( @@ -298,8 +298,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( @@ -307,8 +307,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( @@ -316,8 +316,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( @@ -325,8 +325,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( @@ -334,8 +334,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( @@ -343,8 +343,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( @@ -352,8 +352,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( @@ -361,8 +361,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( @@ -370,8 +370,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( @@ -379,8 +379,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( @@ -388,8 +388,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( @@ -397,7 +397,205 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { +vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { + return vneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vneg_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vneg_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vneg_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vneg_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vneg_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vneg_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vneg_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vneg_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vneg_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vneg_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vneg_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vneg_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vneg_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vneg_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vneg_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vneg_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vneg_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vneg_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vneg_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vneg_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl, size_t ta) { + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vneg_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl, size_t ta) { return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -10,7 +10,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { +vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) { return vnot(op1, vl); } @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { +vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) { return vnot(op1, vl); } @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { +vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -37,7 +37,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { +vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -46,7 +46,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { +vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -55,7 +55,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { +vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -64,7 +64,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { +vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { +vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) { return vnot(op1, vl); } @@ -82,7 +82,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { +vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -91,7 +91,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { +vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -100,7 +100,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { +vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -109,7 +109,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { +vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -118,7 +118,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { +vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -127,7 +127,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { +vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -136,7 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { +vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -145,7 +145,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { +vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -154,7 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { +vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { +vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -172,7 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { +vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -181,7 +181,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { +vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -190,7 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { +vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -199,7 +199,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { +vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -208,7 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { +vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) { return vnot(op1, vl); } @@ -217,7 +217,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { +vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) { return vnot(op1, vl); } @@ -226,7 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { +vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -235,7 +235,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { +vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -244,7 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { +vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { +vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -262,7 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { +vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -271,7 +271,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { +vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) { return vnot(op1, vl); } @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { +vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -289,7 +289,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { +vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { +vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -307,7 +307,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { +vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -316,7 +316,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { +vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -325,7 +325,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { +vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) { return vnot(op1, vl); } @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { +vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { +vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -352,7 +352,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { +vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -361,7 +361,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { +vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { +vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) { return vnot(op1, vl); } @@ -379,7 +379,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { +vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) { return vnot(op1, vl); } @@ -388,7 +388,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { +vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) { return vnot(op1, vl); } @@ -397,7 +397,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { +vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) { return vnot(op1, vl); } @@ -406,8 +406,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( @@ -415,8 +415,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnot_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( @@ -424,8 +424,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnot_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( @@ -433,8 +433,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnot_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( @@ -442,8 +442,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnot_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( @@ -451,8 +451,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnot_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( @@ -460,8 +460,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vnot_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( @@ -469,8 +469,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnot_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( @@ -478,8 +478,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnot_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( @@ -487,8 +487,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnot_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( @@ -496,8 +496,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnot_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( @@ -505,8 +505,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnot_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( @@ -514,8 +514,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vnot_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( @@ -523,8 +523,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnot_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( @@ -532,8 +532,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnot_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( @@ -541,8 +541,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnot_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( @@ -550,8 +550,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnot_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( @@ -559,8 +559,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vnot_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( @@ -568,8 +568,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vnot_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( @@ -577,8 +577,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vnot_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( @@ -586,8 +586,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vnot_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( @@ -595,8 +595,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vnot_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( @@ -604,8 +604,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnot_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( @@ -613,8 +613,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnot_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( @@ -622,8 +622,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnot_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( @@ -631,8 +631,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnot_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( @@ -640,8 +640,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnot_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( @@ -649,8 +649,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnot_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( @@ -658,8 +658,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vnot_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( @@ -667,8 +667,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnot_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( @@ -676,8 +676,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnot_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( @@ -685,8 +685,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnot_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( @@ -694,8 +694,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnot_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( @@ -703,8 +703,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnot_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( @@ -712,8 +712,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vnot_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( @@ -721,8 +721,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnot_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( @@ -730,8 +730,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnot_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( @@ -739,8 +739,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnot_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( @@ -748,8 +748,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnot_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( @@ -757,8 +757,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vnot_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( @@ -766,8 +766,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vnot_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( @@ -775,8 +775,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vnot_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( @@ -784,8 +784,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( @@ -793,7 +793,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { +vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { + return vnot(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnot_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnot_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnot_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnot_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnot_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnot_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnot_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnot_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnot_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnot_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnot_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnot_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnot_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnot_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnot_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnot_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnot_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnot_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnot_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnot_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnot_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnot_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnot_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnot_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnot_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnot_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnot_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnot_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnot_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnot_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnot_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnot_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnot_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnot_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnot_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnot_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnot_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnot_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnot_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnot_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnot_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl, size_t ta) { + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnot_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl, size_t ta) { return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( @@ -289,7 +289,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( @@ -307,7 +307,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( @@ -316,7 +316,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( @@ -325,7 +325,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( @@ -352,7 +352,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( @@ -361,7 +361,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( @@ -379,7 +379,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( @@ -388,7 +388,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( @@ -397,7 +397,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( @@ -406,7 +406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( @@ -415,7 +415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( @@ -424,7 +424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( @@ -442,7 +442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( @@ -451,7 +451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( @@ -478,7 +478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( @@ -487,7 +487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( @@ -496,7 +496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( @@ -505,7 +505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( @@ -514,7 +514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( @@ -532,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( @@ -541,5 +541,276 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vnsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnsra_wv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnsra_wx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnsra_wv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnsra_wx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnsra_wv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnsra_wx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnsra_wv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnsra_wx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnsra_wv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnsra_wx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnsra_wv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl, size_t ta) { return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnsra_wx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnsra_wv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnsra_wx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnsra_wv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnsra_wx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnsra_wv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnsra_wx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnsra_wv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnsra_wx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnsra_wv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnsra_wx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnsra_wv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnsra_wx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnsra_wv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnsra_wx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnsra_wv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnsra_wx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -280,7 +280,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( @@ -289,7 +289,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( @@ -298,7 +298,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( @@ -307,7 +307,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( @@ -316,7 +316,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( @@ -325,7 +325,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( @@ -334,7 +334,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( @@ -352,7 +352,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( @@ -361,7 +361,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( @@ -370,7 +370,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( @@ -379,7 +379,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( @@ -388,7 +388,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( @@ -397,7 +397,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( @@ -406,7 +406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( @@ -415,7 +415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( @@ -424,7 +424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( @@ -442,7 +442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( @@ -451,7 +451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( @@ -478,7 +478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( @@ -487,7 +487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( @@ -496,7 +496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( @@ -505,7 +505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( @@ -514,7 +514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( @@ -532,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vnsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( @@ -541,5 +541,276 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vnsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnsrl_wv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnsrl_wx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnsrl_wv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnsrl_wx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnsrl_wv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnsrl_wx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnsrl_wv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnsrl_wx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnsrl_wv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnsrl_wx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnsrl_wv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl, size_t ta) { return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnsrl_wx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnsrl_wv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnsrl_wx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnsrl_wv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnsrl_wx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnsrl_wv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnsrl_wx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnsrl_wv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnsrl_wx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnsrl_wv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnsrl_wx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnsrl_wv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnsrl_wx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnsrl_wv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnsrl_wx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnsrl_wv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnsrl_wx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vor_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c @@ -129,3 +129,4 @@ unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { return vpopc(mask, op1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -213,6 +213,222 @@ return vreinterpret_i16m8(src); } +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) { + return vreinterpret_i16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) { + return vreinterpret_i16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) { + return vreinterpret_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) { + return vreinterpret_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) { + return vreinterpret_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) { + return vreinterpret_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) { + return vreinterpret_u16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) { + return vreinterpret_u16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) { + return vreinterpret_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) { + return vreinterpret_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) { + return vreinterpret_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) { + return vreinterpret_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) { + return vreinterpret_f16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) { + return vreinterpret_f16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) { + return vreinterpret_f16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) { + return vreinterpret_f16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) { + return vreinterpret_f16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) { + return vreinterpret_f16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) { + return vreinterpret_f16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) { + return vreinterpret_f16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) { + return vreinterpret_f16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) { + return vreinterpret_f16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) { + return vreinterpret_f16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) { + return vreinterpret_f16m8(src); +} + // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: ret [[SRC:%.*]] @@ -1688,3 +1904,4 @@ vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) { return vreinterpret_u32m8(src); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrem(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,8 +10,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, - size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -29,8 +28,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, - size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -48,8 +46,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, - size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -139,8 +136,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, - size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -158,8 +154,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, - size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -177,8 +172,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, - size_t vl) { +vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -196,8 +190,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, - size_t vl) { +vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -215,8 +208,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, - size_t vl) { +vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -234,8 +226,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, - size_t vl) { +vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -253,8 +244,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -272,8 +262,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, - size_t vl) { +vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -291,8 +280,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, - size_t vl) { +vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -310,8 +298,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, - size_t vl) { +vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -329,8 +316,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, - size_t vl) { +vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -348,8 +334,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, - size_t vl) { +vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -367,8 +352,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, - size_t vl) { +vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -386,8 +370,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, - size_t vl) { +vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -405,8 +388,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, - size_t vl) { +vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -424,8 +406,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, - size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -443,8 +424,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, - size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -462,8 +442,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, - size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -553,8 +532,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, - size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -563,8 +541,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, - size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -573,8 +550,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, - size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -583,8 +559,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, - size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -593,8 +568,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, - size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -612,8 +586,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, - size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -631,8 +604,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, - size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -650,8 +622,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, - size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -669,8 +640,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -679,8 +649,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, - size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -689,8 +658,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, - size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -708,8 +676,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, - size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -727,8 +694,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, - size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -746,8 +712,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, - size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -765,8 +730,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, - size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -784,8 +748,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, - size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -803,8 +766,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, - size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -822,8 +784,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, - size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -836,13 +797,120 @@ return vrgather(op1, index, vl); } +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather(op1, index, vl); +} + // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -851,8 +919,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, - size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -861,8 +928,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, - size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -880,8 +946,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, - size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -899,8 +964,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, - size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -918,8 +982,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, - size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -937,8 +1000,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, - size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -956,8 +1018,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, - size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -975,8 +1036,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, - size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -994,8 +1054,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, - size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather(op1, index, vl); } @@ -1013,8 +1072,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1023,8 +1081,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1033,8 +1090,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, - size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1070,8 +1126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1080,8 +1135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1090,8 +1144,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1100,8 +1153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1110,8 +1162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1120,8 +1171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1130,8 +1180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1140,8 +1189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1150,8 +1198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1160,8 +1207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1170,8 +1216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1180,8 +1225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1190,8 +1234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1200,8 +1243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1210,8 +1252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1220,8 +1261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1230,8 +1270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1240,8 +1279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, - size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1250,8 +1288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, - size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1260,8 +1297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, - size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1270,8 +1306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, - size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1280,8 +1315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1290,8 +1324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1300,8 +1333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1310,8 +1342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1320,8 +1351,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1330,8 +1360,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1340,8 +1369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1350,8 +1378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1360,8 +1387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1370,8 +1396,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1380,8 +1405,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1390,8 +1414,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1400,8 +1423,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1410,8 +1432,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1420,8 +1441,61 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1430,8 +1504,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1440,8 +1513,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1450,8 +1522,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, - size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1460,8 +1531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, - size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1470,8 +1540,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, - size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1480,8 +1549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1490,8 +1558,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1500,8 +1567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, - size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1510,8 +1576,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, - size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16(op1, op2, vl); } @@ -1520,10 +1585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint8mf8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( @@ -1531,9 +1594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( @@ -1541,10 +1603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint8mf4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( @@ -1552,9 +1612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( @@ -1562,10 +1621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint8mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( @@ -1573,9 +1630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( @@ -1583,9 +1639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( @@ -1593,9 +1648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( @@ -1603,9 +1657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( @@ -1613,9 +1666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( @@ -1623,9 +1675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( @@ -1633,9 +1684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( @@ -1643,9 +1693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( @@ -1653,9 +1702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( @@ -1663,10 +1711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( @@ -1674,10 +1720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( @@ -1685,10 +1729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( @@ -1696,10 +1738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( @@ -1707,10 +1747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( @@ -1718,9 +1756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( @@ -1728,10 +1765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( @@ -1739,9 +1774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( @@ -1749,10 +1783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( @@ -1760,9 +1792,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( @@ -1770,10 +1801,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( @@ -1781,9 +1810,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( @@ -1791,10 +1819,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint32mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( @@ -1802,10 +1828,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( @@ -1813,10 +1837,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint32m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( @@ -1824,9 +1846,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( @@ -1834,10 +1855,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint32m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( @@ -1845,9 +1864,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( @@ -1855,10 +1873,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint32m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( @@ -1866,9 +1882,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( @@ -1876,10 +1891,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint32m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( @@ -1887,9 +1900,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( @@ -1897,10 +1909,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint64m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( @@ -1908,9 +1918,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( @@ -1918,10 +1927,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint64m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( @@ -1929,9 +1936,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( @@ -1939,10 +1945,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint64m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( @@ -1950,9 +1954,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( @@ -1960,10 +1963,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint64m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( @@ -1971,9 +1972,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( @@ -1981,10 +1981,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( @@ -1992,9 +1990,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( @@ -2002,10 +1999,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( @@ -2013,9 +2008,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( @@ -2023,10 +2017,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( @@ -2034,9 +2026,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( @@ -2044,10 +2035,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( @@ -2055,9 +2044,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( @@ -2065,10 +2053,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( @@ -2076,9 +2062,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( @@ -2086,10 +2071,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( @@ -2097,9 +2080,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( @@ -2107,10 +2089,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( @@ -2118,9 +2098,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( @@ -2128,10 +2107,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( @@ -2139,10 +2116,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( @@ -2150,10 +2125,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( @@ -2161,10 +2134,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( @@ -2172,10 +2143,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( @@ -2183,9 +2152,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( @@ -2193,10 +2161,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( @@ -2204,9 +2170,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( @@ -2214,10 +2179,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( @@ -2225,9 +2188,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( @@ -2235,10 +2197,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( @@ -2246,9 +2206,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( @@ -2256,10 +2215,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( @@ -2267,10 +2224,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( @@ -2278,10 +2233,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( @@ -2289,9 +2242,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( @@ -2299,10 +2251,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( @@ -2310,9 +2260,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( @@ -2320,10 +2269,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( @@ -2331,9 +2278,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( @@ -2341,10 +2287,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( @@ -2352,9 +2296,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( @@ -2362,10 +2305,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( @@ -2373,9 +2314,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( @@ -2383,10 +2323,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( @@ -2394,9 +2332,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( @@ -2404,10 +2341,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( @@ -2415,9 +2350,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( @@ -2425,10 +2359,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( @@ -2436,9 +2368,116 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( @@ -2446,10 +2485,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vuint32mf2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( @@ -2457,10 +2494,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( @@ -2468,10 +2503,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vuint32m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( @@ -2479,10 +2512,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( @@ -2490,10 +2521,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vuint32m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( @@ -2501,10 +2530,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( @@ -2512,10 +2539,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vuint32m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( @@ -2523,10 +2548,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( @@ -2534,10 +2557,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vuint32m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( @@ -2545,10 +2566,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( @@ -2556,10 +2575,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vuint64m1_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( @@ -2567,10 +2584,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( @@ -2578,10 +2593,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vuint64m2_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( @@ -2589,10 +2602,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( @@ -2600,10 +2611,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vuint64m4_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( @@ -2611,10 +2620,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( @@ -2622,10 +2629,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vuint64m8_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( @@ -2633,10 +2638,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t index, - size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { + return vrgather(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( @@ -2644,10 +2647,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( @@ -2655,10 +2656,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( @@ -2666,10 +2665,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint16m1_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( @@ -2677,10 +2674,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint16m2_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( @@ -2688,10 +2683,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint16m4_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( @@ -2699,10 +2692,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint16m8_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( @@ -2710,10 +2701,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( @@ -2721,10 +2710,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( @@ -2732,10 +2719,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( @@ -2743,10 +2728,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( @@ -2754,10 +2737,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( @@ -2765,10 +2746,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t op2, - size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( @@ -2776,435 +2755,1978 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrgather_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrgather_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrgather_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrgather_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrgather_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrgather_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrgather_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrgather_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrgather_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrgather_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrgather_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrgather_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrgather_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrgather_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrgather_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrgather_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrgather_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrgather_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrgather_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrgather_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrgather_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrgather_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrgather_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrgather_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrgather_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrgather_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrgather_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrgather_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrgather_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrgather_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrgather_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrgather_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrgather_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrgather_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgather_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgather_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgather_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgather_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgather_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgather_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgather_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgather_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgather_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgather_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgather_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgather_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgather_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgather_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgather_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgather_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgather_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgather_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgather_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgather_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrgather_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrgather_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgather_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgather_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgather_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgather_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgather_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgather_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgather_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgather_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgather_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgather_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgather_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgather_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgather_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgather_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgather_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgather_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgather_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgather_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgather_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgather_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgather_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgather_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgather_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgather_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgather_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgather_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgather_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgather_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgather_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgather_vx_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgather_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgather_vx_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgather_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgather_vx_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgather_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgather_vx_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgather_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgather_vx_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgather_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgather_vx_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgather_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgather_vx_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgather_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgather_vx_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl, size_t ta) { + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrgatherei16_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrgatherei16_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrgatherei16_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrgatherei16_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrgatherei16_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrgatherei16_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrgatherei16_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrgatherei16_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrgatherei16_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrgatherei16_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrgatherei16_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrgatherei16_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint16m1_t op2, - size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint16m2_t op2, - size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint16m4_t op2, - size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint16m8_t op2, - size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgatherei16_vv_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgatherei16_vv_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgatherei16_vv_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgatherei16_vv_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, - vfloat32m1_t maskedoff, - vfloat32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, - vfloat32m2_t maskedoff, - vfloat32m2_t op1, vuint16m1_t op2, - size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vuint16m2_t op2, - size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vuint16m4_t op2, - size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, - vfloat64m1_t maskedoff, - vfloat64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, - vfloat64m2_t maskedoff, - vfloat64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, - vfloat64m4_t maskedoff, - vfloat64m4_t op1, vuint16m1_t op2, - size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vuint16m2_t op2, - size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -406,7 +406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( @@ -415,7 +415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( @@ -424,7 +424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( @@ -442,7 +442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( @@ -451,7 +451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( @@ -478,7 +478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( @@ -487,7 +487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( @@ -496,7 +496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( @@ -505,7 +505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( @@ -514,7 +514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( @@ -532,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( @@ -541,7 +541,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( @@ -559,7 +559,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( @@ -568,7 +568,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( @@ -577,7 +577,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( @@ -586,7 +586,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( @@ -595,7 +595,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( @@ -604,7 +604,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( @@ -622,7 +622,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( @@ -631,7 +631,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( @@ -649,7 +649,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( @@ -658,7 +658,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( @@ -667,7 +667,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( @@ -676,7 +676,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( @@ -685,7 +685,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( @@ -694,7 +694,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( @@ -712,7 +712,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( @@ -721,7 +721,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( @@ -739,7 +739,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( @@ -748,7 +748,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( @@ -757,7 +757,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( @@ -766,7 +766,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( @@ -775,7 +775,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( @@ -784,7 +784,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vrsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( @@ -793,5 +793,402 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -531,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vsaddu(op1, op2, vl); } @@ -550,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vsaddu(op1, op2, vl); } @@ -641,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vsaddu(op1, op2, vl); } @@ -804,901 +801,1582 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8 (int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { return vse8(base, value, vl); } @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4 (int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { return vse8(base, value, vl); } @@ -33,7 +33,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2 (int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { return vse8(base, value, vl); } @@ -43,7 +43,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1 (int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { return vse8(base, value, vl); } @@ -53,7 +53,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2 (int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { return vse8(base, value, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4 (int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { return vse8(base, value, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8 (int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { return vse8(base, value, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4 (int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { return vse16(base, value, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2 (int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { return vse16(base, value, vl); } @@ -103,7 +103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1 (int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { return vse16(base, value, vl); } @@ -113,7 +113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2 (int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { return vse16(base, value, vl); } @@ -123,7 +123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4 (int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { return vse16(base, value, vl); } @@ -133,7 +133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8 (int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { return vse16(base, value, vl); } @@ -143,7 +143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2 (int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { return vse32(base, value, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1 (int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { return vse32(base, value, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2 (int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { return vse32(base, value, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4 (int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { return vse32(base, value, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8 (int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { return vse32(base, value, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1 (int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { return vse64(base, value, vl); } @@ -203,7 +203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2 (int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { return vse64(base, value, vl); } @@ -213,7 +213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4 (int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { return vse64(base, value, vl); } @@ -223,7 +223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8 (int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { return vse64(base, value, vl); } @@ -233,7 +233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8 (uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8(base, value, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4 (uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8(base, value, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2 (uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8(base, value, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1 (uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { return vse8(base, value, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2 (uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { return vse8(base, value, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4 (uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { return vse8(base, value, vl); } @@ -293,7 +293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8 (uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { return vse8(base, value, vl); } @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4 (uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16(base, value, vl); } @@ -313,7 +313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2 (uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16(base, value, vl); } @@ -323,7 +323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1 (uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { return vse16(base, value, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2 (uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { return vse16(base, value, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4 (uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { return vse16(base, value, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8 (uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { return vse16(base, value, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2 (uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32(base, value, vl); } @@ -373,7 +373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1 (uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { return vse32(base, value, vl); } @@ -383,7 +383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2 (uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { return vse32(base, value, vl); } @@ -393,7 +393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4 (uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { return vse32(base, value, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8 (uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { return vse32(base, value, vl); } @@ -413,7 +413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1 (uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { return vse64(base, value, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2 (uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { return vse64(base, value, vl); } @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4 (uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { return vse64(base, value, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8 (uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { return vse64(base, value, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16mf4 (_Float16 *base, vfloat16mf4_t value, size_t vl) { +void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { return vse16(base, value, vl); } @@ -463,7 +463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16mf2 (_Float16 *base, vfloat16mf2_t value, size_t vl) { +void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { return vse16(base, value, vl); } @@ -473,7 +473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m1 (_Float16 *base, vfloat16m1_t value, size_t vl) { +void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { return vse16(base, value, vl); } @@ -483,7 +483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m2 (_Float16 *base, vfloat16m2_t value, size_t vl) { +void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { return vse16(base, value, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m4 (_Float16 *base, vfloat16m4_t value, size_t vl) { +void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { return vse16(base, value, vl); } @@ -503,7 +503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m8 (_Float16 *base, vfloat16m8_t value, size_t vl) { +void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { return vse16(base, value, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2 (float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { return vse32(base, value, vl); } @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1 (float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { return vse32(base, value, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2 (float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { return vse32(base, value, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4 (float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { return vse32(base, value, vl); } @@ -553,7 +553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8 (float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { return vse32(base, value, vl); } @@ -563,7 +563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1 (double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { return vse64(base, value, vl); } @@ -573,7 +573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2 (double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { return vse64(base, value, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4 (double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { return vse64(base, value, vl); } @@ -593,7 +593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8 (double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { return vse64(base, value, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -623,7 +623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -643,7 +643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -653,7 +653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -663,7 +663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8_m (vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -683,7 +683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -713,7 +713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8_m (vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -733,7 +733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -743,7 +743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -753,7 +753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -763,7 +763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -773,7 +773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8_m (vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -793,7 +793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -803,7 +803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8_m (vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -823,7 +823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -833,7 +833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -843,7 +843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -853,7 +853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -863,7 +863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { return vse8(mask, base, value, vl); } @@ -893,7 +893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -913,7 +913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -923,7 +923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -933,7 +933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -943,7 +943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -953,7 +953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -983,7 +983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1003,7 +1003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1013,7 +1013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1023,7 +1023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1033,7 +1033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1043,7 +1043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { +void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { +void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { +void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1073,7 +1073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { +void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { +void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_f16m8_m (vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { +void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { return vse16(mask, base, value, vl); } @@ -1103,7 +1103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1123,7 +1123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1133,7 +1133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4_m (vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8_m (vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1163,7 +1163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8_m (vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -221,6 +221,60 @@ return vset(dest, 0, val); } +// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t val) { + return vset(dest, 0, val); +} + // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) @@ -544,3 +598,4 @@ vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { return vset(dest, 0, val); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -261,9 +261,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( @@ -271,9 +270,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( @@ -281,9 +279,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( @@ -291,9 +288,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( @@ -301,9 +297,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( @@ -311,9 +306,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( @@ -321,9 +315,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint8mf8_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( @@ -331,9 +324,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint8mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( @@ -341,9 +333,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint8mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( @@ -351,9 +342,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint8m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( @@ -361,9 +351,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint8m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( @@ -371,9 +360,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint8mf8_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return vsext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( @@ -381,9 +369,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint8mf4_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return vsext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( @@ -391,9 +378,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint8mf2_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return vsext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( @@ -401,9 +387,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint8m1_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return vsext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( @@ -411,9 +396,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( @@ -421,9 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( @@ -431,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( @@ -441,9 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( @@ -451,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( @@ -461,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint16mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( @@ -471,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint16mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( @@ -481,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint16m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( @@ -491,9 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint16m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return vsext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( @@ -501,9 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( @@ -511,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( @@ -521,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( @@ -531,7 +504,259 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { + return vsext_vf2(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsext_vf2_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsext_vf2_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsext_vf2_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsext_vf2_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsext_vf2_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsext_vf2_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl, size_t ta) { return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl, size_t ta) { + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl, size_t ta) { + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl, size_t ta) { + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl, size_t ta) { + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf2_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf2_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf2_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf2_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf2_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl, size_t ta) { + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf2_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf2_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf2_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl, size_t ta) { + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -72,8 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, - size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -82,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, - size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -128,8 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, - size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -210,8 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, - size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -220,8 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, - size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -230,8 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, - size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -276,8 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, - size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -286,8 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, - size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -296,8 +288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, - size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -306,8 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, - size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -316,8 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, - size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -326,8 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, - size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -336,8 +324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, - size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -346,8 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, - size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -356,8 +342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, - size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -366,8 +351,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, - size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -376,8 +360,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, - size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -386,8 +369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, - size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -396,8 +378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, - size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -406,8 +387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, - size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -416,8 +396,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { return vslide1down(src, value, vl); } @@ -426,10 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t src, int8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( @@ -437,10 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t src, int8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( @@ -448,10 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t src, int8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( @@ -459,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( @@ -469,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( @@ -479,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( @@ -489,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( @@ -499,10 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( @@ -510,10 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( @@ -521,10 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( @@ -532,10 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( @@ -543,10 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( @@ -554,10 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t src, int16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( @@ -565,10 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t src, int32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( @@ -576,10 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t src, int32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( @@ -587,10 +540,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t src, int32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( @@ -598,10 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t src, int32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( @@ -609,10 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t src, int32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( @@ -620,10 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t src, int64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( @@ -631,10 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t src, int64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( @@ -642,10 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t src, int64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( @@ -653,10 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t src, int64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( @@ -664,10 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( @@ -675,10 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( @@ -686,10 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( @@ -697,10 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( @@ -708,10 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( @@ -719,10 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( @@ -730,10 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t src, uint8_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( @@ -741,11 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vuint16mf4_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( @@ -753,11 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vuint16mf2_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( @@ -765,10 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( @@ -776,10 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( @@ -787,10 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( @@ -798,10 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t src, uint16_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( @@ -809,11 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vuint32mf2_t src, uint32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( @@ -821,10 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t src, uint32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( @@ -832,10 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t src, uint32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( @@ -843,10 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t src, uint32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( @@ -854,10 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t src, uint32_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( @@ -865,10 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t src, uint64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( @@ -876,10 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t src, uint64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( @@ -887,10 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t src, uint64_t value, - size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( @@ -898,8 +792,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1down(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslide1down_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslide1down_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslide1down_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslide1down_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslide1down_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslide1down_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslide1down_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslide1down_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslide1down_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslide1down_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslide1down_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslide1down_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslide1down_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslide1down_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslide1down_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl, size_t ta) { return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslide1down_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslide1down_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslide1down_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslide1down_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslide1down_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslide1down_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslide1down_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslide1down_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslide1down_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslide1down_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslide1down_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslide1down_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslide1down_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslide1down_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslide1down_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslide1down_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslide1down_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslide1down_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslide1down_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslide1down_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslide1down_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslide1down_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslide1down_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslide1down_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslide1down_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslide1down_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslide1down_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -72,8 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, - size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -82,8 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, - size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -128,8 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, - size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -273,8 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, - size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -283,8 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, - size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -293,8 +288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, - size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -303,8 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, - size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -313,8 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, - size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -323,8 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, - size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -333,8 +324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, - size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -343,8 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, - size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -353,8 +342,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, - size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -363,8 +351,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, - size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -373,8 +360,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, - size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -383,8 +369,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, - size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -393,8 +378,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, - size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -403,8 +387,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, - size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -413,8 +396,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { return vslide1up(src, value, vl); } @@ -423,9 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( @@ -433,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( @@ -443,9 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( @@ -453,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( @@ -463,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( @@ -473,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( @@ -483,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( @@ -493,10 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t src, int16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( @@ -504,10 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t src, int16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( @@ -515,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( @@ -525,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( @@ -535,9 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( @@ -545,9 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( @@ -555,10 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t src, int32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( @@ -566,9 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( @@ -576,9 +540,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( @@ -586,9 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( @@ -596,9 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( @@ -606,9 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( @@ -616,9 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( @@ -626,9 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( @@ -636,9 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( @@ -646,10 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t src, uint8_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( @@ -657,10 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t src, uint8_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( @@ -668,10 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t src, uint8_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( @@ -679,9 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( @@ -689,9 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( @@ -699,9 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( @@ -709,9 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( @@ -719,10 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( @@ -730,10 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( @@ -741,10 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( @@ -752,10 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( @@ -763,10 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( @@ -774,10 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t src, uint16_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( @@ -785,10 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t src, uint32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( @@ -796,10 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t src, uint32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( @@ -807,10 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t src, uint32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( @@ -818,10 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t src, uint32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( @@ -829,10 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t src, uint32_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( @@ -840,10 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t src, uint64_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( @@ -851,10 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t src, uint64_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( @@ -862,10 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t src, uint64_t value, - size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( @@ -873,8 +792,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { + return vslide1up(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslide1up_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslide1up_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslide1up_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslide1up_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslide1up_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslide1up_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslide1up_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslide1up_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslide1up_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslide1up_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslide1up_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslide1up_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslide1up_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslide1up_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslide1up_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl, size_t ta) { return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslide1up_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslide1up_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslide1up_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslide1up_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslide1up_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslide1up_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslide1up_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslide1up_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslide1up_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslide1up_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslide1up_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslide1up_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslide1up_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslide1up_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslide1up_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslide1up_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslide1up_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslide1up_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslide1up_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslide1up_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslide1up_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslide1up_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslide1up_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslide1up_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslide1up_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslide1up_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslide1up_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl, size_t ta) { + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsll(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, size_t ta) { return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -405,9 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( @@ -415,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( @@ -425,9 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( @@ -435,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( @@ -445,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( @@ -455,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( @@ -465,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( @@ -475,9 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( @@ -485,9 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( @@ -495,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( @@ -505,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( @@ -515,9 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( @@ -525,9 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( @@ -535,9 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( @@ -545,10 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( @@ -556,9 +540,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( @@ -566,10 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( @@ -577,9 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( @@ -587,9 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( @@ -597,9 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( @@ -607,9 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( @@ -617,9 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( @@ -627,9 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( @@ -637,9 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( @@ -647,9 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( @@ -657,9 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( @@ -667,10 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( @@ -678,9 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( @@ -688,9 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( @@ -698,9 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( @@ -708,9 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( @@ -718,9 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( @@ -728,9 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( @@ -738,9 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( @@ -748,9 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( @@ -758,9 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( @@ -768,9 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( @@ -778,9 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( @@ -788,9 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( @@ -798,9 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( @@ -808,9 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( @@ -818,9 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( @@ -828,9 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( @@ -838,7 +792,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -33,7 +33,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -43,7 +43,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -53,7 +53,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { +void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { +void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -103,7 +103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -113,7 +113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -123,7 +123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -133,7 +133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { +void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -143,7 +143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -203,7 +203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -213,7 +213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -223,7 +223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -233,7 +233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { +void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -293,7 +293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -313,7 +313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -323,7 +323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { +void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -373,7 +373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -383,7 +383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -393,7 +393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -413,7 +413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -463,7 +463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -473,7 +473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -483,7 +483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -503,7 +503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -553,7 +553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -563,7 +563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -573,7 +573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -593,7 +593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -623,7 +623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -643,7 +643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -653,7 +653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -663,7 +663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -683,7 +683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -713,7 +713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -733,7 +733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -743,7 +743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -753,7 +753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -763,7 +763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -773,7 +773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -793,7 +793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -803,7 +803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -823,7 +823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -833,7 +833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -843,7 +843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { +void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -853,7 +853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { +void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -863,7 +863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -893,7 +893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -913,7 +913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { +void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -923,7 +923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -933,7 +933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -943,7 +943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -953,7 +953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -983,7 +983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1003,7 +1003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1013,7 +1013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1023,7 +1023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1033,7 +1033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1043,7 +1043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { +void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1073,7 +1073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1103,7 +1103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1123,7 +1123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { +void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1133,7 +1133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1163,7 +1163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1203,7 +1203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1213,7 +1213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1223,7 +1223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1233,7 +1233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1253,7 +1253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1273,7 +1273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1283,7 +1283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1293,7 +1293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1313,7 +1313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1343,7 +1343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1363,7 +1363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1373,7 +1373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1383,7 +1383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1393,7 +1393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1403,7 +1403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1413,7 +1413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1433,7 +1433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1453,7 +1453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1463,7 +1463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1473,7 +1473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1483,7 +1483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1493,7 +1493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1503,7 +1503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1523,7 +1523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1543,7 +1543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1553,7 +1553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1563,7 +1563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1573,7 +1573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1583,7 +1583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1593,7 +1593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1603,7 +1603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1613,7 +1613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1623,7 +1623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1633,7 +1633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1643,7 +1643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1653,7 +1653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1663,7 +1663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1673,7 +1673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1683,7 +1683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1693,7 +1693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1703,7 +1703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1713,7 +1713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1723,7 +1723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1733,7 +1733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1743,7 +1743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1753,7 +1753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1763,7 +1763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1773,7 +1773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1783,7 +1783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1793,7 +1793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1803,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1813,7 +1813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1823,7 +1823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1833,7 +1833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1843,7 +1843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1853,7 +1853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1863,7 +1863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1873,7 +1873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -1883,7 +1883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1893,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1903,7 +1903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1913,7 +1913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1923,7 +1923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -1933,7 +1933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1943,7 +1943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1953,7 +1953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1963,7 +1963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -1973,7 +1973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1983,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -1993,7 +1993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -2003,7 +2003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } @@ -2013,7 +2013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -2023,7 +2023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -2033,7 +2033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -2043,7 +2043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } @@ -2053,7 +2053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -2063,7 +2063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -2073,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -2083,7 +2083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } @@ -2093,7 +2093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -2103,7 +2103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -2113,7 +2113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -2123,7 +2123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } @@ -2133,7 +2133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2143,7 +2143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2153,7 +2153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2163,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2173,7 +2173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2183,7 +2183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { +void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2193,7 +2193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { +void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2203,7 +2203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2213,7 +2213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2223,7 +2223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2233,7 +2233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2243,7 +2243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2253,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { +void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2263,7 +2263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2273,7 +2273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2283,7 +2283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2293,7 +2293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2303,7 +2303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { +void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2313,7 +2313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { +void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2323,7 +2323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2333,7 +2333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { +void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2343,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { +void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2353,7 +2353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2363,7 +2363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2373,7 +2373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2383,7 +2383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2393,7 +2393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2403,7 +2403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { +void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2413,7 +2413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2423,7 +2423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2433,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2443,7 +2443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2453,7 +2453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2463,7 +2463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { +void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2473,7 +2473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2483,7 +2483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2493,7 +2493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2503,7 +2503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2513,7 +2513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { +void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2523,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { +void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2533,7 +2533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2543,7 +2543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { +void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2553,7 +2553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { +void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2563,7 +2563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2573,7 +2573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2583,7 +2583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2593,7 +2593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2603,7 +2603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2613,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2623,7 +2623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2633,7 +2633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2643,7 +2643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2653,7 +2653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2663,7 +2663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2673,7 +2673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2683,7 +2683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2693,7 +2693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2703,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2713,7 +2713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2723,7 +2723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2733,7 +2733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2743,7 +2743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2753,7 +2753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2763,7 +2763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2773,7 +2773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2783,7 +2783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2793,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2803,7 +2803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2813,7 +2813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2823,7 +2823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2833,7 +2833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2843,7 +2843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2853,7 +2853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2863,7 +2863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -2873,7 +2873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2893,7 +2893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2903,7 +2903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -2913,7 +2913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2923,7 +2923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2933,7 +2933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2943,7 +2943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2953,7 +2953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2963,7 +2963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { +void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2973,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { +void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -2983,7 +2983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -2993,7 +2993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3003,7 +3003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3013,7 +3013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3023,7 +3023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3033,7 +3033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { +void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3043,7 +3043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3053,7 +3053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3063,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3073,7 +3073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3083,7 +3083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { +void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3093,7 +3093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3103,7 +3103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3113,7 +3113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3123,7 +3123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { +void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3133,7 +3133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3143,7 +3143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3153,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3163,7 +3163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3173,7 +3173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3183,7 +3183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { +void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3193,7 +3193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3203,7 +3203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3213,7 +3213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3223,7 +3223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3233,7 +3233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3243,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { +void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3253,7 +3253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3263,7 +3263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3273,7 +3273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3283,7 +3283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3293,7 +3293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { +void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3303,7 +3303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3313,7 +3313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3323,7 +3323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { +void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3333,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { +void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3343,7 +3343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3353,7 +3353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3363,7 +3363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3373,7 +3373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3383,7 +3383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3393,7 +3393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3403,7 +3403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3413,7 +3413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3423,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3433,7 +3433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3443,7 +3443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3453,7 +3453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3463,7 +3463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3473,7 +3473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3483,7 +3483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3493,7 +3493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3503,7 +3503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3513,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3523,7 +3523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3533,7 +3533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3543,7 +3543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3553,7 +3553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3563,7 +3563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3573,7 +3573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3583,7 +3583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3593,7 +3593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3603,7 +3603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3613,7 +3613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3623,7 +3623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3633,7 +3633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3643,7 +3643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3653,7 +3653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3663,7 +3663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3673,7 +3673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3683,7 +3683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3693,7 +3693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3703,7 +3703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3713,7 +3713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3723,7 +3723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3733,7 +3733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3743,7 +3743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3753,7 +3753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3763,7 +3763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3773,7 +3773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3783,7 +3783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3793,7 +3793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3803,7 +3803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3813,7 +3813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3823,7 +3823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3833,7 +3833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3843,7 +3843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3853,7 +3853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -3863,7 +3863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3873,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3883,7 +3883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3893,7 +3893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -3903,7 +3903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3913,7 +3913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3923,7 +3923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3933,7 +3933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3943,7 +3943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -3953,7 +3953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3963,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3973,7 +3973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3983,7 +3983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -3993,7 +3993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -4003,7 +4003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4013,7 +4013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4023,7 +4023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4033,7 +4033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4043,7 +4043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4053,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4063,7 +4063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4073,7 +4073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4083,7 +4083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4093,7 +4093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -4103,7 +4103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -4113,7 +4113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -4123,7 +4123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } @@ -4133,7 +4133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -4143,7 +4143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -4153,7 +4153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -4163,7 +4163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } @@ -4173,7 +4173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4183,7 +4183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4193,7 +4193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4203,7 +4203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } @@ -4213,7 +4213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4223,7 +4223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4233,7 +4233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } @@ -4243,7 +4243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -39,7 +39,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -48,7 +48,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -57,7 +57,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -75,7 +75,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -84,7 +84,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -102,7 +102,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -111,7 +111,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -147,7 +147,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -219,7 +219,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -237,7 +237,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -246,7 +246,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -264,7 +264,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -282,7 +282,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -291,7 +291,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -318,7 +318,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -327,7 +327,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -336,7 +336,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -354,7 +354,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -372,7 +372,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -588,7 +588,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -597,7 +597,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -606,7 +606,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -615,7 +615,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -624,7 +624,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -642,7 +642,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -651,7 +651,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -669,7 +669,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -678,7 +678,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -687,7 +687,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -696,7 +696,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -705,7 +705,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -714,7 +714,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -732,7 +732,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -741,7 +741,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -759,7 +759,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -768,7 +768,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -777,7 +777,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -786,7 +786,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -795,7 +795,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -804,7 +804,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -822,7 +822,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -831,7 +831,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -849,7 +849,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -858,7 +858,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -867,7 +867,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -876,7 +876,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -885,7 +885,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -894,7 +894,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -912,7 +912,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -921,7 +921,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -939,7 +939,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -948,7 +948,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -957,7 +957,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -966,7 +966,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -975,7 +975,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -984,7 +984,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1002,7 +1002,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1011,7 +1011,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1029,7 +1029,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1047,7 +1047,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1056,7 +1056,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1065,7 +1065,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1074,7 +1074,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1092,7 +1092,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1119,7 +1119,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -1128,7 +1128,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1137,7 +1137,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1146,7 +1146,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1155,7 +1155,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1164,7 +1164,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1182,7 +1182,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -1191,7 +1191,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1209,7 +1209,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1218,7 +1218,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1227,7 +1227,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1236,7 +1236,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1245,7 +1245,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -1254,7 +1254,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1272,7 +1272,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1281,7 +1281,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1299,7 +1299,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1308,7 +1308,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1326,7 +1326,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1335,7 +1335,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -1344,7 +1344,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1371,7 +1371,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1389,7 +1389,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1398,7 +1398,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1407,7 +1407,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -1416,7 +1416,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1434,7 +1434,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1452,7 +1452,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1461,7 +1461,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1470,7 +1470,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -1479,7 +1479,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1488,7 +1488,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1497,7 +1497,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1506,7 +1506,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1515,7 +1515,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1524,7 +1524,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -1542,7 +1542,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1551,7 +1551,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1560,7 +1560,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -1569,7 +1569,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -1578,7 +1578,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1587,7 +1587,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1596,7 +1596,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1605,7 +1605,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1614,7 +1614,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1623,7 +1623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1632,7 +1632,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -1641,7 +1641,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1650,7 +1650,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1659,7 +1659,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1668,7 +1668,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1677,7 +1677,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1686,7 +1686,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1695,7 +1695,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -1704,7 +1704,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1713,7 +1713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1722,7 +1722,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1731,7 +1731,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1740,7 +1740,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1749,7 +1749,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1758,7 +1758,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -1767,7 +1767,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1776,7 +1776,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1785,7 +1785,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -1794,7 +1794,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1803,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1812,7 +1812,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1821,7 +1821,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1830,7 +1830,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1839,7 +1839,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1848,7 +1848,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1857,7 +1857,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1866,7 +1866,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1875,7 +1875,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1884,7 +1884,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1893,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1902,7 +1902,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1911,7 +1911,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1920,7 +1920,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1929,7 +1929,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1938,7 +1938,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1947,7 +1947,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1956,7 +1956,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1965,7 +1965,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1974,7 +1974,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1983,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -1992,7 +1992,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2001,7 +2001,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2010,7 +2010,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2019,7 +2019,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2028,7 +2028,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2037,7 +2037,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2046,7 +2046,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2055,7 +2055,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2064,7 +2064,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2073,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2082,7 +2082,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2091,7 +2091,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2100,7 +2100,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2109,7 +2109,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2118,7 +2118,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2127,7 +2127,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2136,7 +2136,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2145,7 +2145,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2154,7 +2154,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2163,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2172,7 +2172,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2181,7 +2181,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2190,7 +2190,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2199,7 +2199,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2208,7 +2208,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2217,7 +2217,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2226,7 +2226,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2235,7 +2235,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2244,7 +2244,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2253,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2262,7 +2262,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2271,7 +2271,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2280,7 +2280,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2289,7 +2289,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2298,7 +2298,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2307,7 +2307,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2316,7 +2316,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2325,7 +2325,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2334,7 +2334,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2343,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2352,7 +2352,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2361,7 +2361,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2370,7 +2370,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2379,7 +2379,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2388,7 +2388,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2397,7 +2397,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2406,7 +2406,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2415,7 +2415,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2424,7 +2424,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2433,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2442,7 +2442,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2451,7 +2451,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2460,7 +2460,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2469,7 +2469,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2478,7 +2478,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2487,7 +2487,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2496,7 +2496,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -2505,7 +2505,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2514,7 +2514,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2523,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2532,7 +2532,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2541,7 +2541,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2550,7 +2550,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2559,7 +2559,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -2568,7 +2568,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2577,7 +2577,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2586,7 +2586,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2595,7 +2595,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2604,7 +2604,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2613,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2622,7 +2622,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -2631,7 +2631,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2640,7 +2640,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2649,7 +2649,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -2658,7 +2658,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2667,7 +2667,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2676,7 +2676,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2685,7 +2685,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2694,7 +2694,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2703,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2712,7 +2712,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2721,7 +2721,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2730,7 +2730,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2739,7 +2739,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2748,7 +2748,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -2757,7 +2757,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2766,7 +2766,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2775,7 +2775,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2784,7 +2784,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2793,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2802,7 +2802,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2811,7 +2811,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2820,7 +2820,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2829,7 +2829,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2838,7 +2838,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2847,7 +2847,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -2856,7 +2856,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2865,7 +2865,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2874,7 +2874,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2892,7 +2892,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2901,7 +2901,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2910,7 +2910,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2919,7 +2919,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2928,7 +2928,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2937,7 +2937,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2946,7 +2946,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -2955,7 +2955,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -2964,7 +2964,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2973,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2982,7 +2982,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2991,7 +2991,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3000,7 +3000,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3009,7 +3009,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3018,7 +3018,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -3027,7 +3027,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3036,7 +3036,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3045,7 +3045,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -3054,7 +3054,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3063,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3072,7 +3072,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3081,7 +3081,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3090,7 +3090,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3099,7 +3099,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3108,7 +3108,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3117,7 +3117,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3126,7 +3126,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3135,7 +3135,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3144,7 +3144,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3153,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3162,7 +3162,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3171,7 +3171,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3180,7 +3180,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3189,7 +3189,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3198,7 +3198,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3207,7 +3207,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3216,7 +3216,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3225,7 +3225,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3234,7 +3234,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3243,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3252,7 +3252,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3261,7 +3261,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3270,7 +3270,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3279,7 +3279,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3288,7 +3288,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3297,7 +3297,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3306,7 +3306,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3315,7 +3315,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3324,7 +3324,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3333,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -3342,7 +3342,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3351,7 +3351,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3360,7 +3360,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3369,7 +3369,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3378,7 +3378,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3387,7 +3387,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3396,7 +3396,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3405,7 +3405,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3414,7 +3414,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3423,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3432,7 +3432,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3441,7 +3441,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3450,7 +3450,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3459,7 +3459,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3468,7 +3468,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3477,7 +3477,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3486,7 +3486,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3495,7 +3495,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3504,7 +3504,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3513,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3522,7 +3522,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3531,7 +3531,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3540,7 +3540,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3549,7 +3549,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3558,7 +3558,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3567,7 +3567,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3576,7 +3576,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3585,7 +3585,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3594,7 +3594,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3603,7 +3603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3612,7 +3612,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3621,7 +3621,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -3630,7 +3630,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -3639,7 +3639,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3648,7 +3648,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3657,7 +3657,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3666,7 +3666,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3675,7 +3675,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3684,7 +3684,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3693,7 +3693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -3702,7 +3702,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3711,7 +3711,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3720,7 +3720,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3729,7 +3729,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3738,7 +3738,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3747,7 +3747,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3756,7 +3756,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -3765,7 +3765,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3774,7 +3774,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3783,7 +3783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3792,7 +3792,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3801,7 +3801,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3810,7 +3810,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3819,7 +3819,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -3828,7 +3828,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3837,7 +3837,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3846,7 +3846,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3855,7 +3855,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3864,7 +3864,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3873,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3882,7 +3882,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -3891,7 +3891,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3900,7 +3900,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3909,7 +3909,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -3918,7 +3918,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3927,7 +3927,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3936,7 +3936,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3945,7 +3945,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3954,7 +3954,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3963,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3972,7 +3972,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -3981,7 +3981,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3990,7 +3990,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3999,7 +3999,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4008,7 +4008,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4017,7 +4017,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4026,7 +4026,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4035,7 +4035,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -4044,7 +4044,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4053,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4062,7 +4062,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4071,7 +4071,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4080,7 +4080,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4089,7 +4089,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4098,7 +4098,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -4107,7 +4107,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4116,7 +4116,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4125,7 +4125,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4134,7 +4134,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4143,7 +4143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4152,7 +4152,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4161,7 +4161,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -4170,7 +4170,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4179,7 +4179,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4188,7 +4188,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4197,7 +4197,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4206,7 +4206,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4215,7 +4215,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4224,7 +4224,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -4233,7 +4233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4242,7 +4242,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4251,7 +4251,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4260,7 +4260,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4269,7 +4269,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4278,7 +4278,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4287,7 +4287,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -4296,7 +4296,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4305,7 +4305,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4314,7 +4314,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4323,7 +4323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4332,7 +4332,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4341,7 +4341,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4350,7 +4350,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -4359,7 +4359,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4368,7 +4368,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4377,7 +4377,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -4386,7 +4386,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -4395,7 +4395,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4404,7 +4404,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4413,7 +4413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4422,7 +4422,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4431,7 +4431,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4440,7 +4440,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4449,7 +4449,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -4458,7 +4458,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4467,7 +4467,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4476,7 +4476,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4485,7 +4485,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4494,7 +4494,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4503,7 +4503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4512,7 +4512,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -4521,7 +4521,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4530,7 +4530,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4539,7 +4539,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4548,7 +4548,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4557,7 +4557,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4566,7 +4566,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4575,7 +4575,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -4584,7 +4584,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4593,7 +4593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4602,7 +4602,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -4611,7 +4611,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -4620,7 +4620,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4629,7 +4629,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4638,7 +4638,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4647,7 +4647,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4656,7 +4656,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4665,7 +4665,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4674,7 +4674,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -4683,7 +4683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4692,7 +4692,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4701,7 +4701,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4710,7 +4710,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4719,7 +4719,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4728,7 +4728,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4737,7 +4737,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -4746,7 +4746,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4755,7 +4755,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4764,7 +4764,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4773,7 +4773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4782,7 +4782,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4791,7 +4791,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4800,7 +4800,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -4809,7 +4809,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4818,7 +4818,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4827,7 +4827,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -4836,7 +4836,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -4845,7 +4845,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4854,7 +4854,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4863,7 +4863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4872,7 +4872,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4881,7 +4881,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4890,7 +4890,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4899,7 +4899,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -4908,7 +4908,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4917,7 +4917,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4926,7 +4926,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4935,7 +4935,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4944,7 +4944,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4953,7 +4953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4962,7 +4962,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -4971,7 +4971,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4980,7 +4980,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4989,7 +4989,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4998,7 +4998,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5007,7 +5007,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5016,7 +5016,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5025,7 +5025,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -5034,7 +5034,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5043,7 +5043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5052,7 +5052,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5061,7 +5061,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5070,7 +5070,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5079,7 +5079,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5088,7 +5088,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5097,7 +5097,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5106,7 +5106,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5115,7 +5115,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5124,7 +5124,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5133,7 +5133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5142,7 +5142,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5151,7 +5151,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5160,7 +5160,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5169,7 +5169,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5178,7 +5178,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5187,7 +5187,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5196,7 +5196,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5205,7 +5205,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5214,7 +5214,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5223,7 +5223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5232,7 +5232,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5241,7 +5241,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5250,7 +5250,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5259,7 +5259,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5268,7 +5268,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5277,7 +5277,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5286,7 +5286,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5295,7 +5295,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5304,7 +5304,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5313,7 +5313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5322,7 +5322,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5331,7 +5331,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5340,7 +5340,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5349,7 +5349,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5358,7 +5358,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5367,7 +5367,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5376,7 +5376,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5385,7 +5385,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5394,7 +5394,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5403,7 +5403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5412,7 +5412,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5421,7 +5421,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5430,7 +5430,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5439,7 +5439,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5448,7 +5448,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5457,7 +5457,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5466,7 +5466,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5475,7 +5475,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5484,7 +5484,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5493,7 +5493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5502,7 +5502,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5511,7 +5511,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5520,7 +5520,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5529,7 +5529,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5538,7 +5538,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -5547,7 +5547,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5556,7 +5556,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5565,7 +5565,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5574,7 +5574,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5583,7 +5583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5592,7 +5592,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5601,7 +5601,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -5610,7 +5610,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5619,7 +5619,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5628,7 +5628,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5637,7 +5637,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5646,7 +5646,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5655,7 +5655,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5664,7 +5664,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -5673,7 +5673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5682,7 +5682,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5691,7 +5691,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -5700,7 +5700,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5709,7 +5709,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5718,7 +5718,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5727,7 +5727,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5736,7 +5736,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5745,7 +5745,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5754,7 +5754,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5763,7 +5763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5772,7 +5772,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5781,7 +5781,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5790,7 +5790,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } @@ -5799,7 +5799,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5808,7 +5808,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5817,7 +5817,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5826,7 +5826,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5835,7 +5835,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5844,7 +5844,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5853,7 +5853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5862,7 +5862,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5871,7 +5871,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5880,7 +5880,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5889,7 +5889,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } @@ -5898,7 +5898,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5907,7 +5907,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5916,7 +5916,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5925,7 +5925,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5934,7 +5934,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5943,7 +5943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5952,7 +5952,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5961,7 +5961,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5970,7 +5970,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5979,7 +5979,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5988,7 +5988,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } @@ -5997,7 +5997,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -6006,7 +6006,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -6015,7 +6015,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -6024,7 +6024,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -6033,7 +6033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -6042,7 +6042,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -6051,7 +6051,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -6060,7 +6060,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } @@ -6069,7 +6069,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -6078,7 +6078,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -6087,7144 +6087,8926 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsoxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsoxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); -} - // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13233,7 +15015,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13242,7 +15024,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13251,7 +15033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13260,7 +15042,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13269,7 +15051,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13278,7 +15060,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13287,7 +15069,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13296,7 +15078,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13305,7 +15087,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13314,7 +15096,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13323,7 +15105,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13332,7 +15114,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13341,7 +15123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13350,7 +15132,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13359,7 +15141,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13368,7 +15150,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13377,7 +15159,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13386,7 +15168,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13395,7 +15177,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13404,7 +15186,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13413,7 +15195,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13422,7 +15204,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13431,7 +15213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13440,7 +15222,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13449,7 +15231,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13458,7 +15240,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13467,7 +15249,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13476,7 +15258,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13485,7 +15267,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13494,7 +15276,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13503,7 +15285,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13512,7 +15294,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13521,7 +15303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13530,7 +15312,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13539,7 +15321,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13548,7 +15330,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13557,7 +15339,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13566,7 +15348,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13575,7 +15357,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13584,7 +15366,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13593,7 +15375,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13602,7 +15384,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13611,7 +15393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13620,7 +15402,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13629,7 +15411,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13638,7 +15420,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13647,7 +15429,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13656,7 +15438,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13665,7 +15447,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13674,7 +15456,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13683,7 +15465,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13692,7 +15474,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13701,7 +15483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13710,7 +15492,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13719,7 +15501,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13728,7 +15510,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13737,7 +15519,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13746,7 +15528,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13755,7 +15537,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13764,7 +15546,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13773,7 +15555,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13782,7 +15564,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13791,7 +15573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13800,7 +15582,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13809,7 +15591,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13818,7 +15600,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13827,7 +15609,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13836,7 +15618,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13845,7 +15627,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13854,7 +15636,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13863,7 +15645,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13872,7 +15654,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13881,7 +15663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13890,7 +15672,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13899,7 +15681,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13908,7 +15690,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13917,7 +15699,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13926,7 +15708,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13935,7 +15717,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13944,7 +15726,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13953,7 +15735,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13962,7 +15744,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13971,7 +15753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13980,7 +15762,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13989,7 +15771,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13998,7 +15780,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14007,7 +15789,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14016,7 +15798,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14025,7 +15807,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14034,7 +15816,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -14043,7 +15825,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -14052,7 +15834,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14061,7 +15843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -14070,7 +15852,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14079,7 +15861,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -14088,7 +15870,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14097,7 +15879,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14106,7 +15888,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14115,7 +15897,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14124,7 +15906,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14133,7 +15915,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14142,7 +15924,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -14151,7 +15933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14160,7 +15942,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14169,7 +15951,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -14178,7 +15960,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -14187,7 +15969,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14196,7 +15978,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14205,7 +15987,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14214,7 +15996,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14223,7 +16005,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14232,7 +16014,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -14241,7 +16023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -14250,7 +16032,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14259,7 +16041,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -406,7 +406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( @@ -415,7 +415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( @@ -424,7 +424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( @@ -442,7 +442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( @@ -451,7 +451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( @@ -478,7 +478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( @@ -487,7 +487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( @@ -496,7 +496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( @@ -505,7 +505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( @@ -514,7 +514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( @@ -532,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( @@ -541,7 +541,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( @@ -559,7 +559,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( @@ -568,7 +568,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( @@ -577,7 +577,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( @@ -586,7 +586,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( @@ -595,7 +595,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( @@ -604,7 +604,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( @@ -622,7 +622,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( @@ -631,7 +631,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( @@ -649,7 +649,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( @@ -658,7 +658,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( @@ -667,7 +667,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( @@ -676,7 +676,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( @@ -685,7 +685,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( @@ -694,7 +694,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( @@ -712,7 +712,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( @@ -721,7 +721,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( @@ -739,7 +739,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( @@ -748,7 +748,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( @@ -757,7 +757,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( @@ -766,7 +766,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( @@ -775,7 +775,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( @@ -784,7 +784,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( @@ -793,5 +793,402 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, size_t ta) { return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -406,7 +406,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( @@ -415,7 +415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( @@ -424,7 +424,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( @@ -442,7 +442,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( @@ -451,7 +451,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( @@ -460,7 +460,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( @@ -478,7 +478,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( @@ -487,7 +487,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( @@ -496,7 +496,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( @@ -505,7 +505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( @@ -514,7 +514,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( @@ -532,7 +532,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( @@ -541,7 +541,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( @@ -550,7 +550,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( @@ -559,7 +559,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( @@ -568,7 +568,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( @@ -577,7 +577,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( @@ -586,7 +586,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( @@ -595,7 +595,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( @@ -604,7 +604,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( @@ -622,7 +622,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( @@ -631,7 +631,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( @@ -640,7 +640,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( @@ -649,7 +649,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( @@ -658,7 +658,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( @@ -667,7 +667,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( @@ -676,7 +676,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( @@ -685,7 +685,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( @@ -694,7 +694,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( @@ -712,7 +712,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( @@ -721,7 +721,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( @@ -730,7 +730,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( @@ -739,7 +739,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( @@ -748,7 +748,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( @@ -757,7 +757,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( @@ -766,7 +766,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( @@ -775,7 +775,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( @@ -784,7 +784,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); + return vsrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( @@ -793,5 +793,402 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, size_t ta) { return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { +void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { +void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -33,7 +33,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { +void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -43,7 +43,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { +void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -53,7 +53,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { +void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m4 (int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { +void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m8 (int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { +void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { +void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { +void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -103,7 +103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { +void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -113,7 +113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { +void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -123,7 +123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m4 (int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { +void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -133,7 +133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m8 (int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { +void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -143,7 +143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { +void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { +void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { +void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m4 (int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { +void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m8 (int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { +void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { +void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -203,7 +203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { +void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -213,7 +213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m4 (int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { +void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -223,7 +223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m8 (int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { +void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -233,7 +233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { +void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { +void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { +void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { +void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { +void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m4 (uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { +void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -293,7 +293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m8 (uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { +void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { +void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -313,7 +313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { +void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -323,7 +323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { +void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { +void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m4 (uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { +void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m8 (uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { +void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { +void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -373,7 +373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { +void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -383,7 +383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { +void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -393,7 +393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m4 (uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { +void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m8 (uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { +void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -413,7 +413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { +void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { +void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m4 (uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { +void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m8 (uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { +void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { +void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -463,7 +463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { +void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -473,7 +473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { +void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -483,7 +483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { +void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { +void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -503,7 +503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m8 (_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { +void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { return vsse16(base, bstride, value, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { +void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { +void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m2 (float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { +void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m4 (float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { +void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -553,7 +553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m8 (float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { +void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } @@ -563,7 +563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { +void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -573,7 +573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m2 (double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { +void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m4 (double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { +void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -593,7 +593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m8 (double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { +void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { +void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { +void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -623,7 +623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { +void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { +void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -643,7 +643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { +void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -653,7 +653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m4_m (vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { +void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -663,7 +663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m8_m (vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { +void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { +void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -683,7 +683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { +void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { +void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { +void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -713,7 +713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m4_m (vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { +void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m8_m (vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { +void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -733,7 +733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { +void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -743,7 +743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { +void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -753,7 +753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { +void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -763,7 +763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m4_m (vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { +void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -773,7 +773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m8_m (vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { +void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { +void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -793,7 +793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { +void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -803,7 +803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m4_m (vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { +void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m8_m (vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { +void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -823,7 +823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { +void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -833,7 +833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { +void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -843,7 +843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { +void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -853,7 +853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { +void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -863,7 +863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { +void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m4_m (vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { +void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m8_m (vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { +void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } @@ -893,7 +893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { +void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { +void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -913,7 +913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { +void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -923,7 +923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { +void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -933,7 +933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m4_m (vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { +void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -943,7 +943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m8_m (vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { +void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -953,7 +953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { +void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { +void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { +void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -983,7 +983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m4_m (vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { +void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m8_m (vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { +void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1003,7 +1003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { +void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1013,7 +1013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { +void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1023,7 +1023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m4_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { +void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1033,7 +1033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m8_m (vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { +void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1043,7 +1043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { +void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { +void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { +void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1073,7 +1073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { +void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { +void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_f16m8_m (vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { +void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } @@ -1103,7 +1103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { +void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { +void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1123,7 +1123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m2_m (vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { +void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1133,7 +1133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m4_m (vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { +void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m8_m (vbool4_t mask, float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { +void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { +void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1163,7 +1163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m2_m (vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { +void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m4_m (vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { +void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m8_m (vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { +void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -39,7 +39,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -48,7 +48,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -57,7 +57,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -75,7 +75,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -84,7 +84,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -102,7 +102,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -111,7 +111,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -147,7 +147,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsseg2e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsseg3e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -219,7 +219,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsseg4e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsseg5e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -237,7 +237,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsseg6e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -246,7 +246,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsseg7e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsseg8e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -264,7 +264,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsseg2e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsseg3e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -282,7 +282,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsseg4e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -291,7 +291,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m4 (int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsseg2e8_v_i8m4(int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsseg2e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsseg3e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -318,7 +318,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsseg4e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -327,7 +327,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsseg5e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -336,7 +336,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsseg6e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsseg7e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -354,7 +354,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsseg8e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsseg2e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -372,7 +372,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsseg3e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsseg4e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsseg5e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsseg6e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsseg7e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsseg8e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsseg2e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsseg3e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsseg4e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsseg5e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsseg6e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsseg7e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsseg8e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsseg2e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsseg3e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsseg4e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m4 (int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsseg2e16_v_i16m4(int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -588,7 +588,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsseg2e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -597,7 +597,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsseg3e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -606,7 +606,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsseg4e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -615,7 +615,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsseg5e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -624,7 +624,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsseg6e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsseg7e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -642,7 +642,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsseg8e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -651,7 +651,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsseg2e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsseg3e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -669,7 +669,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsseg4e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -678,7 +678,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m4 (int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsseg2e32_v_i32m4(int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -687,7 +687,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsseg2e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -696,7 +696,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsseg3e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -705,7 +705,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsseg4e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -714,7 +714,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsseg5e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsseg5e64(base, v0, v1, v2, v3, v4, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsseg6e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl); } @@ -732,7 +732,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsseg7e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -741,7 +741,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsseg8e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsseg2e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -759,7 +759,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsseg3e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -768,7 +768,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsseg4e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -777,7 +777,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m4 (int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsseg2e64_v_i64m4(int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -786,7 +786,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -795,7 +795,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -804,7 +804,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -822,7 +822,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -831,7 +831,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -849,7 +849,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -858,7 +858,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -867,7 +867,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -876,7 +876,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -885,7 +885,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -894,7 +894,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -912,7 +912,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -921,7 +921,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -939,7 +939,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -948,7 +948,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -957,7 +957,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -966,7 +966,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -975,7 +975,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsseg2e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -984,7 +984,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsseg3e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsseg4e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -1002,7 +1002,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsseg5e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsseg5e8(base, v0, v1, v2, v3, v4, vl); } @@ -1011,7 +1011,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsseg6e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsseg6e8(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsseg7e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsseg7e8(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1029,7 +1029,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsseg8e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsseg8e8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsseg2e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -1047,7 +1047,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsseg3e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsseg3e8(base, v0, v1, v2, vl); } @@ -1056,7 +1056,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsseg4e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsseg4e8(base, v0, v1, v2, v3, vl); } @@ -1065,7 +1065,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m4 (uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsseg2e8_v_u8m4(uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsseg2e8(base, v0, v1, vl); } @@ -1074,7 +1074,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsseg2e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsseg3e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -1092,7 +1092,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsseg4e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsseg5e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsseg6e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1119,7 +1119,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsseg7e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1128,7 +1128,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsseg8e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1137,7 +1137,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsseg2e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -1146,7 +1146,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsseg3e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -1155,7 +1155,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsseg4e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -1164,7 +1164,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsseg5e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsseg6e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1182,7 +1182,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsseg7e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1191,7 +1191,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsseg8e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsseg2e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -1209,7 +1209,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsseg3e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -1218,7 +1218,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsseg4e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -1227,7 +1227,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsseg5e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsseg5e16(base, v0, v1, v2, v3, v4, vl); } @@ -1236,7 +1236,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsseg6e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1245,7 +1245,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsseg7e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1254,7 +1254,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsseg8e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsseg2e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -1272,7 +1272,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsseg3e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsseg3e16(base, v0, v1, v2, vl); } @@ -1281,7 +1281,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsseg4e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsseg4e16(base, v0, v1, v2, v3, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m4 (uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsseg2e16_v_u16m4(uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsseg2e16(base, v0, v1, vl); } @@ -1299,7 +1299,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1308,7 +1308,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1326,7 +1326,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -1335,7 +1335,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1344,7 +1344,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsseg2e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1371,7 +1371,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsseg3e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsseg4e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1389,7 +1389,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsseg5e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -1398,7 +1398,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsseg6e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1407,7 +1407,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsseg7e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1416,7 +1416,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsseg8e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsseg2e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1434,7 +1434,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsseg3e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsseg4e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1452,7 +1452,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m4 (uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsseg2e32_v_u32m4(uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1461,7 +1461,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsseg2e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -1470,7 +1470,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsseg3e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -1479,7 +1479,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsseg4e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -1488,7 +1488,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsseg5e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsseg5e64(base, v0, v1, v2, v3, v4, vl); } @@ -1497,7 +1497,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsseg6e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1506,7 +1506,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsseg7e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1515,7 +1515,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsseg8e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1524,7 +1524,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsseg2e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsseg3e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -1542,7 +1542,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsseg4e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -1551,16 +1551,241 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m4 (uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsseg2e64_v_u64m4(uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1569,7 +1794,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1578,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1587,7 +1812,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -1596,7 +1821,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1605,7 +1830,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1614,7 +1839,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1623,7 +1848,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsseg2e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1632,7 +1857,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsseg3e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1641,7 +1866,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsseg4e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1650,7 +1875,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsseg5e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsseg5e32(base, v0, v1, v2, v3, v4, vl); } @@ -1659,7 +1884,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsseg6e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsseg6e32(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1668,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsseg7e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsseg7e32(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1677,7 +1902,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsseg8e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsseg8e32(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1686,7 +1911,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsseg2e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1695,7 +1920,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsseg3e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsseg3e32(base, v0, v1, v2, vl); } @@ -1704,7 +1929,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsseg4e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsseg4e32(base, v0, v1, v2, v3, vl); } @@ -1713,7 +1938,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m4 (float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsseg2e32_v_f32m4(float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsseg2e32(base, v0, v1, vl); } @@ -1722,7 +1947,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsseg2e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -1731,7 +1956,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsseg3e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -1740,7 +1965,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsseg4e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -1749,7 +1974,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsseg5e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsseg5e64(base, v0, v1, v2, v3, v4, vl); } @@ -1758,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsseg6e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsseg6e64(base, v0, v1, v2, v3, v4, v5, vl); } @@ -1767,7 +1992,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsseg7e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsseg7e64(base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1776,7 +2001,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsseg8e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsseg8e64(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1785,7 +2010,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsseg2e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -1794,7 +2019,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsseg3e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsseg3e64(base, v0, v1, v2, vl); } @@ -1803,7 +2028,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsseg4e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsseg4e64(base, v0, v1, v2, v3, vl); } @@ -1812,7 +2037,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m4 (double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsseg2e64_v_f64m4(double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsseg2e64(base, v0, v1, vl); } @@ -1821,7 +2046,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -1830,7 +2055,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -1839,7 +2064,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -1848,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -1857,7 +2082,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -1866,7 +2091,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1875,7 +2100,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1884,7 +2109,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -1893,7 +2118,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -1902,7 +2127,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -1911,7 +2136,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -1920,7 +2145,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -1929,7 +2154,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1938,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1947,7 +2172,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -1956,7 +2181,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -1965,7 +2190,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -1974,7 +2199,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -1983,7 +2208,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -1992,7 +2217,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2001,7 +2226,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2010,7 +2235,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2019,7 +2244,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2028,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2037,7 +2262,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2046,7 +2271,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2055,7 +2280,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2064,7 +2289,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2073,7 +2298,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2082,7 +2307,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2091,7 +2316,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2100,7 +2325,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2109,7 +2334,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2118,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2127,7 +2352,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2136,7 +2361,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2145,7 +2370,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2154,7 +2379,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2163,7 +2388,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2172,7 +2397,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2181,7 +2406,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2190,7 +2415,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2199,7 +2424,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2208,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2217,7 +2442,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2226,7 +2451,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2235,7 +2460,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2244,7 +2469,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2253,7 +2478,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2262,7 +2487,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2271,7 +2496,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2280,7 +2505,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2289,7 +2514,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2298,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2307,7 +2532,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2316,7 +2541,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2325,7 +2550,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2334,7 +2559,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -2343,7 +2568,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -2352,7 +2577,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -2361,7 +2586,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2370,7 +2595,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2379,7 +2604,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2388,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2397,7 +2622,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -2406,7 +2631,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -2415,7 +2640,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -2424,7 +2649,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2433,7 +2658,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2442,7 +2667,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2451,7 +2676,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2460,7 +2685,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -2469,7 +2694,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -2478,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -2487,7 +2712,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -2496,7 +2721,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -2505,7 +2730,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -2514,7 +2739,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -2523,7 +2748,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2532,7 +2757,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2541,7 +2766,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2550,7 +2775,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2559,7 +2784,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -2568,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -2577,7 +2802,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -2586,7 +2811,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -2595,7 +2820,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2604,7 +2829,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2613,7 +2838,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2622,7 +2847,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2631,7 +2856,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2640,7 +2865,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2649,7 +2874,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2658,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2667,7 +2892,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2676,7 +2901,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2685,7 +2910,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2694,7 +2919,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2703,7 +2928,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2712,7 +2937,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2721,7 +2946,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2730,7 +2955,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2739,7 +2964,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2748,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2757,7 +2982,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2766,7 +2991,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2775,7 +3000,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2784,7 +3009,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2793,7 +3018,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2802,7 +3027,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2811,7 +3036,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsseg5e8(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2820,7 +3045,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsseg6e8(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2829,7 +3054,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsseg7e8(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2838,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsseg8e8(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2847,7 +3072,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2856,7 +3081,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsseg3e8(mask, base, v0, v1, v2, vl); } @@ -2865,7 +3090,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsseg4e8(mask, base, v0, v1, v2, v3, vl); } @@ -2874,7 +3099,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsseg2e8(mask, base, v0, v1, vl); } @@ -2883,7 +3108,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2892,7 +3117,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2901,7 +3126,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2910,7 +3135,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2919,7 +3144,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2928,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2937,7 +3162,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2946,7 +3171,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -2955,7 +3180,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -2964,7 +3189,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -2973,7 +3198,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -2982,7 +3207,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -2991,7 +3216,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3000,7 +3225,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3009,7 +3234,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -3018,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -3027,7 +3252,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -3036,7 +3261,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3045,7 +3270,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3054,7 +3279,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3063,7 +3288,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3072,7 +3297,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -3081,7 +3306,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsseg3e16(mask, base, v0, v1, v2, vl); } @@ -3090,7 +3315,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsseg4e16(mask, base, v0, v1, v2, v3, vl); } @@ -3099,7 +3324,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsseg2e16(mask, base, v0, v1, vl); } @@ -3108,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3117,7 +3342,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3126,7 +3351,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3135,7 +3360,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3144,7 +3369,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3153,7 +3378,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3162,7 +3387,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3171,7 +3396,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3180,7 +3405,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3189,7 +3414,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3198,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3207,7 +3432,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3216,7 +3441,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3225,7 +3450,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3234,7 +3459,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3243,7 +3468,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3252,7 +3477,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3261,7 +3486,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3270,7 +3495,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -3279,7 +3504,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -3288,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -3297,7 +3522,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3306,7 +3531,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3315,7 +3540,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3324,7 +3549,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3333,7 +3558,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -3342,7 +3567,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -3351,7 +3576,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -3360,16 +3585,241 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsseg2e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3378,7 +3828,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsseg3e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3387,7 +3837,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsseg4e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3396,7 +3846,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsseg5e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3405,7 +3855,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsseg6e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3414,7 +3864,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsseg7e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3423,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsseg8e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3432,7 +3882,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsseg2e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3441,7 +3891,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsseg3e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3450,7 +3900,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsseg4e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3459,7 +3909,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsseg5e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsseg5e32(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3468,7 +3918,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsseg6e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsseg6e32(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3477,7 +3927,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsseg7e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsseg7e32(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3486,7 +3936,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsseg8e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsseg8e32(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3495,7 +3945,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsseg2e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3504,7 +3954,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsseg3e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsseg3e32(mask, base, v0, v1, v2, vl); } @@ -3513,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsseg4e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsseg4e32(mask, base, v0, v1, v2, v3, vl); } @@ -3522,7 +3972,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32m4_m (vbool8_t mask, float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsseg2e32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsseg2e32(mask, base, v0, v1, vl); } @@ -3531,7 +3981,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsseg2e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -3540,7 +3990,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsseg3e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -3549,7 +3999,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsseg4e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -3558,7 +4008,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsseg5e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsseg5e64(mask, base, v0, v1, v2, v3, v4, vl); } @@ -3567,7 +4017,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg6e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsseg6e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsseg6e64(mask, base, v0, v1, v2, v3, v4, v5, vl); } @@ -3576,7 +4026,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg7e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsseg7e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsseg7e64(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3585,7 +4035,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg8e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsseg8e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsseg8e64(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3594,7 +4044,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsseg2e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } @@ -3603,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsseg3e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsseg3e64(mask, base, v0, v1, v2, vl); } @@ -3612,7 +4062,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsseg4e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsseg4e64(mask, base, v0, v1, v2, v3, vl); } @@ -3621,7 +4071,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsseg2e64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsseg2e64(mask, base, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vssra(op1, shift, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vssra(op1, shift, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vssra(op1, shift, vl); } @@ -408,9 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( @@ -418,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( @@ -428,9 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( @@ -438,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( @@ -448,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( @@ -458,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( @@ -468,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( @@ -478,9 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( @@ -488,9 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( @@ -498,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( @@ -508,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( @@ -518,9 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( @@ -528,9 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( @@ -538,9 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( @@ -548,10 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( @@ -559,9 +540,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( @@ -569,10 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( @@ -580,9 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( @@ -590,9 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( @@ -600,9 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( @@ -610,9 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( @@ -620,9 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( @@ -630,9 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( @@ -640,9 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( @@ -650,9 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( @@ -660,9 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( @@ -670,10 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( @@ -681,9 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( @@ -691,9 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( @@ -701,9 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( @@ -711,9 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( @@ -721,9 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( @@ -731,9 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( @@ -741,9 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( @@ -751,9 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( @@ -761,9 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( @@ -771,9 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( @@ -781,9 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( @@ -791,9 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( @@ -801,9 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( @@ -811,9 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( @@ -821,9 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( @@ -831,9 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( @@ -841,7 +792,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, size_t ta) { return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -135,8 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vssrl(op1, shift, vl); } @@ -154,8 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vssrl(op1, shift, vl); } @@ -245,8 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vssrl(op1, shift, vl); } @@ -408,10 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( @@ -419,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( @@ -429,10 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( @@ -440,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( @@ -450,10 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( @@ -461,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( @@ -471,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( @@ -481,9 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( @@ -491,9 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( @@ -501,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( @@ -511,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( @@ -521,9 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( @@ -531,9 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( @@ -541,9 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( @@ -551,10 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( @@ -562,9 +540,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( @@ -572,10 +549,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( @@ -583,9 +558,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( @@ -593,10 +567,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( @@ -604,9 +576,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( @@ -614,10 +585,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( @@ -625,9 +594,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( @@ -635,10 +603,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( @@ -646,9 +612,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( @@ -656,10 +621,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( @@ -667,9 +630,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( @@ -677,10 +639,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( @@ -688,9 +648,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( @@ -698,10 +657,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( @@ -709,9 +666,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( @@ -719,10 +675,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( @@ -730,9 +684,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( @@ -740,10 +693,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( @@ -751,9 +702,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( @@ -761,10 +711,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( @@ -772,9 +720,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( @@ -782,10 +729,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( @@ -793,9 +738,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( @@ -803,10 +747,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( @@ -814,9 +756,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( @@ -824,10 +765,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( @@ -835,9 +774,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( @@ -845,10 +783,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t shift, - size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( @@ -856,7 +792,403 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, size_t ta) { return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, size_t ta) { + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -39,7 +39,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -48,7 +48,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -57,7 +57,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -75,7 +75,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -84,7 +84,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -102,7 +102,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -111,7 +111,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -147,7 +147,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -219,7 +219,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -237,7 +237,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -246,7 +246,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -264,7 +264,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -282,7 +282,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -291,7 +291,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4 (int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -318,7 +318,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -327,7 +327,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -336,7 +336,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -354,7 +354,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -372,7 +372,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4 (int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -588,7 +588,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -597,7 +597,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -606,7 +606,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -615,7 +615,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -624,7 +624,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -642,7 +642,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -651,7 +651,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -669,7 +669,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -678,7 +678,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4 (int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -687,7 +687,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -696,7 +696,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -705,7 +705,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -714,7 +714,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -732,7 +732,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -741,7 +741,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -759,7 +759,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -768,7 +768,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -777,7 +777,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4 (int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -786,7 +786,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -795,7 +795,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -804,7 +804,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -822,7 +822,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -831,7 +831,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -849,7 +849,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -858,7 +858,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -867,7 +867,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -876,7 +876,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -885,7 +885,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -894,7 +894,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -912,7 +912,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -921,7 +921,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -939,7 +939,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -948,7 +948,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -957,7 +957,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -966,7 +966,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -975,7 +975,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -984,7 +984,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -1002,7 +1002,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vssseg5e8(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1011,7 +1011,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vssseg6e8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vssseg7e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1029,7 +1029,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vssseg8e8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -1047,7 +1047,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vssseg3e8(base, bstride, v0, v1, v2, vl); } @@ -1056,7 +1056,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vssseg4e8(base, bstride, v0, v1, v2, v3, vl); } @@ -1065,7 +1065,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4 (uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vssseg2e8(base, bstride, v0, v1, vl); } @@ -1074,7 +1074,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -1092,7 +1092,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1119,7 +1119,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1128,7 +1128,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1137,7 +1137,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -1146,7 +1146,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -1155,7 +1155,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -1164,7 +1164,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1182,7 +1182,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1191,7 +1191,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -1209,7 +1209,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -1218,7 +1218,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -1227,7 +1227,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1236,7 +1236,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1245,7 +1245,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1254,7 +1254,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -1272,7 +1272,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vssseg3e16(base, bstride, v0, v1, v2, vl); } @@ -1281,7 +1281,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4 (uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vssseg2e16(base, bstride, v0, v1, vl); } @@ -1299,7 +1299,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1308,7 +1308,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1326,7 +1326,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1335,7 +1335,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1344,7 +1344,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1371,7 +1371,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1389,7 +1389,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1398,7 +1398,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1407,7 +1407,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1416,7 +1416,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1434,7 +1434,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1452,7 +1452,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4 (uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1461,7 +1461,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -1470,7 +1470,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -1479,7 +1479,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -1488,7 +1488,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1497,7 +1497,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1506,7 +1506,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1515,7 +1515,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1524,7 +1524,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -1542,7 +1542,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -1551,16 +1551,241 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4 (uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1569,7 +1794,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1578,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1587,7 +1812,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1596,7 +1821,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1605,7 +1830,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1614,7 +1839,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1623,7 +1848,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1632,7 +1857,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1641,7 +1866,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1650,7 +1875,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vssseg5e32(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1659,7 +1884,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vssseg6e32(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1668,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vssseg7e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1677,7 +1902,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vssseg8e32(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1686,7 +1911,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2 (float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1695,7 +1920,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2 (float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vssseg3e32(base, bstride, v0, v1, v2, vl); } @@ -1704,7 +1929,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2 (float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vssseg4e32(base, bstride, v0, v1, v2, v3, vl); } @@ -1713,7 +1938,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4 (float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vssseg2e32(base, bstride, v0, v1, vl); } @@ -1722,7 +1947,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -1731,7 +1956,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -1740,7 +1965,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -1749,7 +1974,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vssseg5e64(base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1758,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vssseg6e64(base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1767,7 +1992,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vssseg7e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1776,7 +2001,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vssseg8e64(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1785,7 +2010,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2 (double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -1794,7 +2019,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2 (double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vssseg3e64(base, bstride, v0, v1, v2, vl); } @@ -1803,7 +2028,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2 (double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vssseg4e64(base, bstride, v0, v1, v2, v3, vl); } @@ -1812,7 +2037,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4 (double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vssseg2e64(base, bstride, v0, v1, vl); } @@ -1821,7 +2046,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -1830,7 +2055,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -1839,7 +2064,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -1848,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1857,7 +2082,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1866,7 +2091,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1875,7 +2100,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1884,7 +2109,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -1893,7 +2118,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -1902,7 +2127,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -1911,7 +2136,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1920,7 +2145,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1929,7 +2154,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1938,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1947,7 +2172,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -1956,7 +2181,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -1965,7 +2190,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -1974,7 +2199,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -1983,7 +2208,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -1992,7 +2217,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2001,7 +2226,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2010,7 +2235,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2019,7 +2244,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2028,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2037,7 +2262,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2046,7 +2271,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2055,7 +2280,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2064,7 +2289,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2073,7 +2298,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2082,7 +2307,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2091,7 +2316,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2100,7 +2325,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_i8m4_m (vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2109,7 +2334,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2118,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2127,7 +2352,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2136,7 +2361,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2145,7 +2370,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2154,7 +2379,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2163,7 +2388,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2172,7 +2397,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2181,7 +2406,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2190,7 +2415,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2199,7 +2424,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2208,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2217,7 +2442,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2226,7 +2451,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2235,7 +2460,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2244,7 +2469,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2253,7 +2478,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2262,7 +2487,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2271,7 +2496,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2280,7 +2505,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2289,7 +2514,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2298,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2307,7 +2532,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2316,7 +2541,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2325,7 +2550,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_i16m4_m (vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2334,7 +2559,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -2343,7 +2568,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -2352,7 +2577,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2361,7 +2586,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2370,7 +2595,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2379,7 +2604,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2388,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2397,7 +2622,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -2406,7 +2631,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -2415,7 +2640,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2424,7 +2649,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2433,7 +2658,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2442,7 +2667,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2451,7 +2676,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2460,7 +2685,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -2469,7 +2694,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -2478,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2487,7 +2712,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_i32m4_m (vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -2496,7 +2721,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -2505,7 +2730,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -2514,7 +2739,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2523,7 +2748,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2532,7 +2757,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2541,7 +2766,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2550,7 +2775,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2559,7 +2784,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -2568,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -2577,7 +2802,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2586,7 +2811,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_i64m4_m (vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -2595,7 +2820,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2604,7 +2829,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2613,7 +2838,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2622,7 +2847,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2631,7 +2856,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2640,7 +2865,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2649,7 +2874,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2658,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2667,7 +2892,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2676,7 +2901,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2685,7 +2910,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2694,7 +2919,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2703,7 +2928,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2712,7 +2937,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2721,7 +2946,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2730,7 +2955,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2739,7 +2964,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2748,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2757,7 +2982,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2766,7 +2991,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2775,7 +3000,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2784,7 +3009,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2793,7 +3018,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2802,7 +3027,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2811,7 +3036,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vssseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vssseg5e8(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2820,7 +3045,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vssseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vssseg6e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2829,7 +3054,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vssseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vssseg7e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2838,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vssseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vssseg8e8(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2847,7 +3072,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2856,7 +3081,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vssseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vssseg3e8(mask, base, bstride, v0, v1, v2, vl); } @@ -2865,7 +3090,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vssseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vssseg4e8(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2874,7 +3099,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e8_v_u8m4_m (vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vssseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vssseg2e8(mask, base, bstride, v0, v1, vl); } @@ -2883,7 +3108,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2892,7 +3117,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2901,7 +3126,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2910,7 +3135,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2919,7 +3144,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2928,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2937,7 +3162,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2946,7 +3171,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -2955,7 +3180,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -2964,7 +3189,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -2973,7 +3198,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -2982,7 +3207,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -2991,7 +3216,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3000,7 +3225,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3009,7 +3234,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -3018,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -3027,7 +3252,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3036,7 +3261,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vssseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3045,7 +3270,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vssseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3054,7 +3279,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vssseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3063,7 +3288,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vssseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3072,7 +3297,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -3081,7 +3306,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vssseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); } @@ -3090,7 +3315,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vssseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3099,7 +3324,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e16_v_u16m4_m (vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vssseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vssseg2e16(mask, base, bstride, v0, v1, vl); } @@ -3108,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3117,7 +3342,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3126,7 +3351,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3135,7 +3360,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3144,7 +3369,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3153,7 +3378,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3162,7 +3387,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3171,7 +3396,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3180,7 +3405,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3189,7 +3414,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3198,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vssseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3207,7 +3432,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vssseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3216,7 +3441,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vssseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3225,7 +3450,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vssseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3234,7 +3459,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3243,7 +3468,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vssseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3252,7 +3477,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vssseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3261,7 +3486,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_u32m4_m (vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vssseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3270,7 +3495,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -3279,7 +3504,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -3288,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3297,7 +3522,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vssseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3306,7 +3531,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vssseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3315,7 +3540,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vssseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3324,7 +3549,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vssseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3333,7 +3558,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -3342,7 +3567,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vssseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -3351,7 +3576,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vssseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3360,16 +3585,241 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_u64m4_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vssseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3378,7 +3828,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3387,7 +3837,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3396,7 +3846,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3405,7 +3855,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3414,7 +3864,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3423,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3432,7 +3882,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3441,7 +3891,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3450,7 +3900,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3459,7 +3909,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vssseg5e32(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3468,7 +3918,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vssseg6e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3477,7 +3927,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vssseg7e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3486,7 +3936,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vssseg8e32(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3495,7 +3945,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m2_m (vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3504,7 +3954,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32m2_m (vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vssseg3e32(mask, base, bstride, v0, v1, v2, vl); } @@ -3513,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32m2_m (vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vssseg4e32(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3522,7 +3972,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32m4_m (vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vssseg2e32(mask, base, bstride, v0, v1, vl); } @@ -3531,7 +3981,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -3540,7 +3990,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -3549,7 +3999,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3558,7 +4008,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vssseg5e64(mask, base, bstride, v0, v1, v2, v3, v4, vl); } @@ -3567,7 +4017,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg6e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vssseg6e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } @@ -3576,7 +4026,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg7e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vssseg7e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3585,7 +4035,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg8e64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vssseg8e64(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3594,7 +4044,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m2_m (vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } @@ -3603,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e64_v_f64m2_m (vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vssseg3e64(mask, base, bstride, v0, v1, v2, vl); } @@ -3612,7 +4062,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e64_v_f64m2_m (vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vssseg4e64(mask, base, bstride, v0, v1, v2, v3, vl); } @@ -3621,7 +4071,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e64_v_f64m4_m (vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vssseg2e64(mask, base, bstride, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -531,8 +531,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vssubu(op1, op2, vl); } @@ -550,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vssubu(op1, op2, vl); } @@ -641,8 +639,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vssubu(op1, op2, vl); } @@ -804,901 +801,1582 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vsub(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -33,7 +33,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -43,7 +43,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -53,7 +53,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { +void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -73,7 +73,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { +void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -103,7 +103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -113,7 +113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -123,7 +123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -133,7 +133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { +void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -143,7 +143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -163,7 +163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -203,7 +203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -213,7 +213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -223,7 +223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -233,7 +233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -253,7 +253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { +void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -293,7 +293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -303,7 +303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -313,7 +313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -323,7 +323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { +void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -373,7 +373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -383,7 +383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -393,7 +393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -413,7 +413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -423,7 +423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -433,7 +433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -463,7 +463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -473,7 +473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -483,7 +483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -503,7 +503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -513,7 +513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -523,7 +523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -553,7 +553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -563,7 +563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -573,7 +573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -593,7 +593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -603,7 +603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -613,7 +613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -623,7 +623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -643,7 +643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -653,7 +653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -663,7 +663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -683,7 +683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -693,7 +693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -703,7 +703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -713,7 +713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -733,7 +733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -743,7 +743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -753,7 +753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -763,7 +763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -773,7 +773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -793,7 +793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -803,7 +803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -823,7 +823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -833,7 +833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -843,7 +843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { +void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -853,7 +853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { +void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -863,7 +863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -893,7 +893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -913,7 +913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { +void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -923,7 +923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -933,7 +933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -943,7 +943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -953,7 +953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -983,7 +983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1003,7 +1003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1013,7 +1013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1023,7 +1023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1033,7 +1033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1043,7 +1043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { +void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1073,7 +1073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1103,7 +1103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1113,7 +1113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1123,7 +1123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { +void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1133,7 +1133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1163,7 +1163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1193,7 +1193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1203,7 +1203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1213,7 +1213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1223,7 +1223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1233,7 +1233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1253,7 +1253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1273,7 +1273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1283,7 +1283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1293,7 +1293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1313,7 +1313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1343,7 +1343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1363,7 +1363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1373,7 +1373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1383,7 +1383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1393,7 +1393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1403,7 +1403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1413,7 +1413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1433,7 +1433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1453,7 +1453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1463,7 +1463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1473,7 +1473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1483,7 +1483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1493,7 +1493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1503,7 +1503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1523,7 +1523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1543,7 +1543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1553,7 +1553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1563,7 +1563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1573,7 +1573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1583,7 +1583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1593,7 +1593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1603,7 +1603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1613,7 +1613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1623,7 +1623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1633,7 +1633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1643,7 +1643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1653,7 +1653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1663,7 +1663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1673,7 +1673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1683,7 +1683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1693,7 +1693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1703,7 +1703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1713,7 +1713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1723,7 +1723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1733,7 +1733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1743,7 +1743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1753,7 +1753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1763,7 +1763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1773,7 +1773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1783,7 +1783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1793,7 +1793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1803,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1813,7 +1813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1823,7 +1823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1833,7 +1833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1843,7 +1843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1853,7 +1853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1863,7 +1863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1873,7 +1873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -1883,7 +1883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1893,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1903,7 +1903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1913,7 +1913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1923,7 +1923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -1933,7 +1933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1943,7 +1943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1953,7 +1953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1963,7 +1963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -1973,7 +1973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1983,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -1993,7 +1993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -2003,7 +2003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } @@ -2013,7 +2013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -2023,7 +2023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -2033,7 +2033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -2043,7 +2043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } @@ -2053,7 +2053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -2063,7 +2063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -2073,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -2083,7 +2083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } @@ -2093,7 +2093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -2103,7 +2103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -2113,7 +2113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -2123,7 +2123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } @@ -2133,7 +2133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2143,7 +2143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2153,7 +2153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2163,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2173,7 +2173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2183,7 +2183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { +void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2193,7 +2193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { +void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2203,7 +2203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2213,7 +2213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2223,7 +2223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2233,7 +2233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2243,7 +2243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2253,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { +void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2263,7 +2263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2273,7 +2273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2283,7 +2283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2293,7 +2293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2303,7 +2303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { +void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2313,7 +2313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { +void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2323,7 +2323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { +void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2333,7 +2333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { +void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2343,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { +void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2353,7 +2353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2363,7 +2363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2373,7 +2373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2383,7 +2383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2393,7 +2393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2403,7 +2403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { +void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2413,7 +2413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2423,7 +2423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2433,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2443,7 +2443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2453,7 +2453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2463,7 +2463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { +void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2473,7 +2473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2483,7 +2483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2493,7 +2493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2503,7 +2503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2513,7 +2513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { +void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2523,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { +void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2533,7 +2533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { +void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2543,7 +2543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { +void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2553,7 +2553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { +void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2563,7 +2563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2573,7 +2573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2583,7 +2583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2593,7 +2593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2603,7 +2603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2613,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2623,7 +2623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2633,7 +2633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2643,7 +2643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2653,7 +2653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2663,7 +2663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2673,7 +2673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2683,7 +2683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2693,7 +2693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2703,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { +void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2713,7 +2713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { +void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2723,7 +2723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { +void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2733,7 +2733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { +void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2743,7 +2743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { +void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2753,7 +2753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2763,7 +2763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2773,7 +2773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2783,7 +2783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2793,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2803,7 +2803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2813,7 +2813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2823,7 +2823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2833,7 +2833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2843,7 +2843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2853,7 +2853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2863,7 +2863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -2873,7 +2873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { +void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { +void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2893,7 +2893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { +void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2903,7 +2903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { +void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -2913,7 +2913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2923,7 +2923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2933,7 +2933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2943,7 +2943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2953,7 +2953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2963,7 +2963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { +void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2973,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { +void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -2983,7 +2983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -2993,7 +2993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3003,7 +3003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3013,7 +3013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3023,7 +3023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3033,7 +3033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { +void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3043,7 +3043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3053,7 +3053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3063,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3073,7 +3073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3083,7 +3083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { +void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3093,7 +3093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { +void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3103,7 +3103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { +void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3113,7 +3113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { +void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3123,7 +3123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { +void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3133,7 +3133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3143,7 +3143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3153,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3163,7 +3163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3173,7 +3173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3183,7 +3183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { +void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3193,7 +3193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3203,7 +3203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3213,7 +3213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3223,7 +3223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3233,7 +3233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3243,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { +void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3253,7 +3253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3263,7 +3263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3273,7 +3273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3283,7 +3283,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3293,7 +3293,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { +void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3303,7 +3303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { +void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3313,7 +3313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { +void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3323,7 +3323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { +void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3333,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { +void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3343,7 +3343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3353,7 +3353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3363,7 +3363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3373,7 +3373,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3383,7 +3383,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3393,7 +3393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3403,7 +3403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3413,7 +3413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3423,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3433,7 +3433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3443,7 +3443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3453,7 +3453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3463,7 +3463,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3473,7 +3473,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3483,7 +3483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { +void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3493,7 +3493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { +void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3503,7 +3503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { +void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3513,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { +void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3523,7 +3523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { +void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3533,7 +3533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3543,7 +3543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3553,7 +3553,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3563,7 +3563,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3573,7 +3573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3583,7 +3583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3593,7 +3593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3603,7 +3603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3613,7 +3613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3623,7 +3623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3633,7 +3633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3643,7 +3643,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3653,7 +3653,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { +void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3663,7 +3663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { +void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3673,7 +3673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { +void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3683,7 +3683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { +void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3693,7 +3693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3703,7 +3703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3713,7 +3713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3723,7 +3723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3733,7 +3733,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3743,7 +3743,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3753,7 +3753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3763,7 +3763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3773,7 +3773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3783,7 +3783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3793,7 +3793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3803,7 +3803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { +void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3813,7 +3813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3823,7 +3823,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3833,7 +3833,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3843,7 +3843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3853,7 +3853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { +void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -3863,7 +3863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { +void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3873,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { +void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3883,7 +3883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { +void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3893,7 +3893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { +void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -3903,7 +3903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3913,7 +3913,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3923,7 +3923,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3933,7 +3933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3943,7 +3943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -3953,7 +3953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3963,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3973,7 +3973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3983,7 +3983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -3993,7 +3993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -4003,7 +4003,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4013,7 +4013,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4023,7 +4023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4033,7 +4033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4043,7 +4043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { +void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4053,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { +void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4063,7 +4063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { +void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4073,7 +4073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { +void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4083,7 +4083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { +void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4093,7 +4093,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -4103,7 +4103,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -4113,7 +4113,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -4123,7 +4123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } @@ -4133,7 +4133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -4143,7 +4143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -4153,7 +4153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -4163,7 +4163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } @@ -4173,7 +4173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4183,7 +4183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4193,7 +4193,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4203,7 +4203,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } @@ -4213,7 +4213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { +void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4223,7 +4223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { +void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4233,7 +4233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { +void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } @@ -4243,7 +4243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { +void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -30,7 +30,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -39,7 +39,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -48,7 +48,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -57,7 +57,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -66,7 +66,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -75,7 +75,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -84,7 +84,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -93,7 +93,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -102,7 +102,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -111,7 +111,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -120,7 +120,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -129,7 +129,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -138,7 +138,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -147,7 +147,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -156,7 +156,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -165,7 +165,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -174,7 +174,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -183,7 +183,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -192,7 +192,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -201,7 +201,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -210,7 +210,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -219,7 +219,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -228,7 +228,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -237,7 +237,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -246,7 +246,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -255,7 +255,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -264,7 +264,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -282,7 +282,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -291,7 +291,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -300,7 +300,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -309,7 +309,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -318,7 +318,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -327,7 +327,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -336,7 +336,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -345,7 +345,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -354,7 +354,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -363,7 +363,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -372,7 +372,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -588,7 +588,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -597,7 +597,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -606,7 +606,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -615,7 +615,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -624,7 +624,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -633,7 +633,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -642,7 +642,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -651,7 +651,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -660,7 +660,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -669,7 +669,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -678,7 +678,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -687,7 +687,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -696,7 +696,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -705,7 +705,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -714,7 +714,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -723,7 +723,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -732,7 +732,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -741,7 +741,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -750,7 +750,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -759,7 +759,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -768,7 +768,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -777,7 +777,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -786,7 +786,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -795,7 +795,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -804,7 +804,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -813,7 +813,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -822,7 +822,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -831,7 +831,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -840,7 +840,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -849,7 +849,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -858,7 +858,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -867,7 +867,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -876,7 +876,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -885,7 +885,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -894,7 +894,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -903,7 +903,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -912,7 +912,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -921,7 +921,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -930,7 +930,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -939,7 +939,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -948,7 +948,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -957,7 +957,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -966,7 +966,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -975,7 +975,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -984,7 +984,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -993,7 +993,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1002,7 +1002,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1011,7 +1011,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1020,7 +1020,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1029,7 +1029,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1038,7 +1038,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1047,7 +1047,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1056,7 +1056,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1065,7 +1065,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1074,7 +1074,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1083,7 +1083,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1092,7 +1092,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1101,7 +1101,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1110,7 +1110,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1119,7 +1119,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -1128,7 +1128,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1137,7 +1137,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1146,7 +1146,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1155,7 +1155,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1164,7 +1164,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1173,7 +1173,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1182,7 +1182,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -1191,7 +1191,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1200,7 +1200,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1209,7 +1209,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1218,7 +1218,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1227,7 +1227,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1236,7 +1236,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1245,7 +1245,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -1254,7 +1254,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1263,7 +1263,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1272,7 +1272,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1281,7 +1281,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1290,7 +1290,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1299,7 +1299,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1308,7 +1308,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -1317,7 +1317,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -1326,7 +1326,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -1335,7 +1335,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -1344,7 +1344,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -1353,7 +1353,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1362,7 +1362,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1371,7 +1371,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1380,7 +1380,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1389,7 +1389,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1398,7 +1398,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1407,7 +1407,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -1416,7 +1416,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1425,7 +1425,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1434,7 +1434,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1443,7 +1443,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1452,7 +1452,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1461,7 +1461,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1470,7 +1470,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -1479,7 +1479,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1488,7 +1488,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1497,7 +1497,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1506,7 +1506,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1515,7 +1515,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1524,7 +1524,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -1542,7 +1542,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -1551,7 +1551,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -1560,7 +1560,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -1569,7 +1569,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -1578,7 +1578,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1587,7 +1587,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1596,7 +1596,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1605,7 +1605,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1614,7 +1614,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1623,7 +1623,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1632,7 +1632,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -1641,7 +1641,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1650,7 +1650,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1659,7 +1659,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1668,7 +1668,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1677,7 +1677,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1686,7 +1686,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1695,7 +1695,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -1704,7 +1704,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1713,7 +1713,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1722,7 +1722,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1731,7 +1731,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1740,7 +1740,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1749,7 +1749,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1758,7 +1758,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -1767,7 +1767,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -1776,7 +1776,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -1785,7 +1785,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -1794,7 +1794,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1803,7 +1803,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1812,7 +1812,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1821,7 +1821,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1830,7 +1830,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1839,7 +1839,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1848,7 +1848,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1857,7 +1857,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1866,7 +1866,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1875,7 +1875,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1884,7 +1884,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1893,7 +1893,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1902,7 +1902,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1911,7 +1911,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1920,7 +1920,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1929,7 +1929,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -1938,7 +1938,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -1947,7 +1947,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -1956,7 +1956,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -1965,7 +1965,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -1974,7 +1974,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -1983,7 +1983,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -1992,7 +1992,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2001,7 +2001,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2010,7 +2010,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2019,7 +2019,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2028,7 +2028,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2037,7 +2037,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2046,7 +2046,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2055,7 +2055,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2064,7 +2064,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2073,7 +2073,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2082,7 +2082,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2091,7 +2091,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2100,7 +2100,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2109,7 +2109,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2118,7 +2118,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2127,7 +2127,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2136,7 +2136,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2145,7 +2145,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2154,7 +2154,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2163,7 +2163,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2172,7 +2172,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2181,7 +2181,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2190,7 +2190,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2199,7 +2199,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2208,7 +2208,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2217,7 +2217,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2226,7 +2226,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2235,7 +2235,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2244,7 +2244,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2253,7 +2253,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2262,7 +2262,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2271,7 +2271,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2280,7 +2280,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2289,7 +2289,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2298,7 +2298,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2307,7 +2307,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2316,7 +2316,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2325,7 +2325,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2334,7 +2334,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2343,7 +2343,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2352,7 +2352,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2361,7 +2361,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2370,7 +2370,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2379,7 +2379,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2388,7 +2388,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2397,7 +2397,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2406,7 +2406,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2415,7 +2415,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2424,7 +2424,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2433,7 +2433,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2442,7 +2442,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2451,7 +2451,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2460,7 +2460,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2469,7 +2469,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2478,7 +2478,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2487,7 +2487,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2496,7 +2496,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -2505,7 +2505,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2514,7 +2514,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2523,7 +2523,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2532,7 +2532,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2541,7 +2541,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2550,7 +2550,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2559,7 +2559,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -2568,7 +2568,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2577,7 +2577,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2586,7 +2586,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2595,7 +2595,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2604,7 +2604,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2613,7 +2613,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2622,7 +2622,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -2631,7 +2631,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2640,7 +2640,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2649,7 +2649,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -2658,7 +2658,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2667,7 +2667,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2676,7 +2676,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2685,7 +2685,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2694,7 +2694,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2703,7 +2703,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2712,7 +2712,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2721,7 +2721,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2730,7 +2730,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -2739,7 +2739,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -2748,7 +2748,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -2757,7 +2757,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2766,7 +2766,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2775,7 +2775,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2784,7 +2784,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2793,7 +2793,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2802,7 +2802,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2811,7 +2811,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2820,7 +2820,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2829,7 +2829,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -2838,7 +2838,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -2847,7 +2847,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -2856,7 +2856,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2865,7 +2865,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2874,7 +2874,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2883,7 +2883,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2892,7 +2892,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -2901,7 +2901,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -2910,7 +2910,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -2919,7 +2919,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2928,7 +2928,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -2937,7 +2937,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -2946,7 +2946,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -2955,7 +2955,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -2964,7 +2964,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -2973,7 +2973,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -2982,7 +2982,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -2991,7 +2991,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3000,7 +3000,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3009,7 +3009,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3018,7 +3018,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -3027,7 +3027,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3036,7 +3036,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3045,7 +3045,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -3054,7 +3054,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3063,7 +3063,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3072,7 +3072,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3081,7 +3081,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3090,7 +3090,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3099,7 +3099,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3108,7 +3108,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3117,7 +3117,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3126,7 +3126,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3135,7 +3135,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3144,7 +3144,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3153,7 +3153,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3162,7 +3162,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3171,7 +3171,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3180,7 +3180,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3189,7 +3189,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3198,7 +3198,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3207,7 +3207,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3216,7 +3216,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3225,7 +3225,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3234,7 +3234,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3243,7 +3243,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3252,7 +3252,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3261,7 +3261,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3270,7 +3270,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3279,7 +3279,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3288,7 +3288,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3297,7 +3297,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3306,7 +3306,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3315,7 +3315,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -3324,7 +3324,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -3333,7 +3333,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -3342,7 +3342,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3351,7 +3351,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3360,7 +3360,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3369,7 +3369,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3378,7 +3378,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3387,7 +3387,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3396,7 +3396,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3405,7 +3405,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3414,7 +3414,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3423,7 +3423,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3432,7 +3432,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3441,7 +3441,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3450,7 +3450,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3459,7 +3459,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3468,7 +3468,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3477,7 +3477,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3486,7 +3486,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3495,7 +3495,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3504,7 +3504,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3513,7 +3513,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3522,7 +3522,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3531,7 +3531,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3540,7 +3540,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3549,7 +3549,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3558,7 +3558,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3567,7 +3567,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3576,7 +3576,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3585,7 +3585,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3594,7 +3594,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3603,7 +3603,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -3612,7 +3612,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -3621,7 +3621,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -3630,7 +3630,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -3639,7 +3639,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3648,7 +3648,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3657,7 +3657,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3666,7 +3666,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3675,7 +3675,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3684,7 +3684,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3693,7 +3693,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -3702,7 +3702,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3711,7 +3711,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3720,7 +3720,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3729,7 +3729,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3738,7 +3738,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3747,7 +3747,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3756,7 +3756,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -3765,7 +3765,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3774,7 +3774,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3783,7 +3783,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3792,7 +3792,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3801,7 +3801,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3810,7 +3810,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3819,7 +3819,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -3828,7 +3828,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3837,7 +3837,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3846,7 +3846,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3855,7 +3855,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3864,7 +3864,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3873,7 +3873,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3882,7 +3882,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -3891,7 +3891,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -3900,7 +3900,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -3909,7 +3909,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -3918,7 +3918,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3927,7 +3927,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3936,7 +3936,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -3945,7 +3945,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -3954,7 +3954,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -3963,7 +3963,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -3972,7 +3972,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -3981,7 +3981,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -3990,7 +3990,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -3999,7 +3999,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4008,7 +4008,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4017,7 +4017,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4026,7 +4026,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4035,7 +4035,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -4044,7 +4044,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4053,7 +4053,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4062,7 +4062,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4071,7 +4071,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4080,7 +4080,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4089,7 +4089,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4098,7 +4098,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -4107,7 +4107,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4116,7 +4116,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4125,7 +4125,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4134,7 +4134,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4143,7 +4143,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4152,7 +4152,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4161,7 +4161,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -4170,7 +4170,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4179,7 +4179,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4188,7 +4188,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4197,7 +4197,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4206,7 +4206,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4215,7 +4215,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4224,7 +4224,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -4233,7 +4233,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4242,7 +4242,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4251,7 +4251,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4260,7 +4260,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4269,7 +4269,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4278,7 +4278,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4287,7 +4287,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -4296,7 +4296,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4305,7 +4305,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4314,7 +4314,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4323,7 +4323,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4332,7 +4332,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4341,7 +4341,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4350,7 +4350,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -4359,7 +4359,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -4368,7 +4368,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -4377,7 +4377,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -4386,7 +4386,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -4395,7 +4395,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4404,7 +4404,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4413,7 +4413,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4422,7 +4422,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4431,7 +4431,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4440,7 +4440,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4449,7 +4449,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -4458,7 +4458,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4467,7 +4467,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4476,7 +4476,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4485,7 +4485,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4494,7 +4494,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4503,7 +4503,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4512,7 +4512,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -4521,7 +4521,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4530,7 +4530,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4539,7 +4539,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4548,7 +4548,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4557,7 +4557,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4566,7 +4566,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4575,7 +4575,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -4584,7 +4584,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -4593,7 +4593,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -4602,7 +4602,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -4611,7 +4611,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -4620,7 +4620,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4629,7 +4629,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4638,7 +4638,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4647,7 +4647,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4656,7 +4656,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4665,7 +4665,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4674,7 +4674,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -4683,7 +4683,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4692,7 +4692,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4701,7 +4701,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4710,7 +4710,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4719,7 +4719,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4728,7 +4728,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4737,7 +4737,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -4746,7 +4746,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4755,7 +4755,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4764,7 +4764,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4773,7 +4773,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4782,7 +4782,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4791,7 +4791,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4800,7 +4800,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -4809,7 +4809,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -4818,7 +4818,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -4827,7 +4827,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -4836,7 +4836,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -4845,7 +4845,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4854,7 +4854,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4863,7 +4863,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4872,7 +4872,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4881,7 +4881,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4890,7 +4890,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4899,7 +4899,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -4908,7 +4908,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4917,7 +4917,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4926,7 +4926,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4935,7 +4935,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -4944,7 +4944,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -4953,7 +4953,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -4962,7 +4962,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -4971,7 +4971,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -4980,7 +4980,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -4989,7 +4989,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -4998,7 +4998,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5007,7 +5007,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5016,7 +5016,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5025,7 +5025,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -5034,7 +5034,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5043,7 +5043,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5052,7 +5052,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5061,7 +5061,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5070,7 +5070,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5079,7 +5079,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5088,7 +5088,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5097,7 +5097,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5106,7 +5106,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5115,7 +5115,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5124,7 +5124,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5133,7 +5133,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5142,7 +5142,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5151,7 +5151,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5160,7 +5160,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5169,7 +5169,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5178,7 +5178,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5187,7 +5187,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5196,7 +5196,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5205,7 +5205,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5214,7 +5214,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5223,7 +5223,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5232,7 +5232,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5241,7 +5241,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5250,7 +5250,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5259,7 +5259,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5268,7 +5268,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5277,7 +5277,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5286,7 +5286,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5295,7 +5295,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5304,7 +5304,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5313,7 +5313,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5322,7 +5322,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5331,7 +5331,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5340,7 +5340,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5349,7 +5349,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5358,7 +5358,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5367,7 +5367,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5376,7 +5376,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5385,7 +5385,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5394,7 +5394,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5403,7 +5403,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5412,7 +5412,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5421,7 +5421,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5430,7 +5430,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5439,7 +5439,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5448,7 +5448,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5457,7 +5457,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5466,7 +5466,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5475,7 +5475,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5484,7 +5484,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5493,7 +5493,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5502,7 +5502,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5511,7 +5511,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5520,7 +5520,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5529,7 +5529,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5538,7 +5538,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -5547,7 +5547,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5556,7 +5556,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5565,7 +5565,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5574,7 +5574,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5583,7 +5583,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5592,7 +5592,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5601,7 +5601,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -5610,7 +5610,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5619,7 +5619,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5628,7 +5628,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5637,7 +5637,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5646,7 +5646,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5655,7 +5655,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5664,7 +5664,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -5673,7 +5673,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -5682,7 +5682,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -5691,7 +5691,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -5700,7 +5700,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5709,7 +5709,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5718,7 +5718,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5727,7 +5727,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5736,7 +5736,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5745,7 +5745,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5754,7 +5754,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5763,7 +5763,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5772,7 +5772,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } @@ -5781,7 +5781,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } @@ -5790,7 +5790,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } @@ -5799,7 +5799,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5808,7 +5808,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5817,7 +5817,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5826,7 +5826,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5835,7 +5835,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5844,7 +5844,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5853,7 +5853,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5862,7 +5862,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5871,7 +5871,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } @@ -5880,7 +5880,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } @@ -5889,7 +5889,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } @@ -5898,7 +5898,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5907,7 +5907,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5916,7 +5916,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5925,7 +5925,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -5934,7 +5934,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -5943,7 +5943,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -5952,7 +5952,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -5961,7 +5961,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5970,7 +5970,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } @@ -5979,7 +5979,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } @@ -5988,7 +5988,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } @@ -5997,7 +5997,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -6006,7 +6006,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -6015,7 +6015,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -6024,7 +6024,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } @@ -6033,7 +6033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -6042,7 +6042,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -6051,7 +6051,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -6060,7 +6060,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } @@ -6069,7 +6069,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } @@ -6078,7 +6078,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } @@ -6087,7144 +6087,8926 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { return vsuxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { return vsuxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei8(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei16(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei32(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei64(base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { +void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { +void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { +void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { +void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); -} - // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13233,7 +15015,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13242,7 +15024,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13251,7 +15033,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13260,7 +15042,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13269,7 +15051,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13278,7 +15060,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13287,7 +15069,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13296,7 +15078,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13305,7 +15087,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13314,7 +15096,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13323,7 +15105,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13332,7 +15114,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13341,7 +15123,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13350,7 +15132,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13359,7 +15141,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13368,7 +15150,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13377,7 +15159,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13386,7 +15168,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13395,7 +15177,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13404,7 +15186,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13413,7 +15195,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13422,7 +15204,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13431,7 +15213,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13440,7 +15222,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13449,7 +15231,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13458,7 +15240,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13467,7 +15249,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13476,7 +15258,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13485,7 +15267,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13494,7 +15276,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13503,7 +15285,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13512,7 +15294,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13521,7 +15303,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13530,7 +15312,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13539,7 +15321,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13548,7 +15330,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13557,7 +15339,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13566,7 +15348,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13575,7 +15357,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13584,7 +15366,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13593,7 +15375,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13602,7 +15384,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13611,7 +15393,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13620,7 +15402,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13629,7 +15411,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13638,7 +15420,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13647,7 +15429,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13656,7 +15438,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13665,7 +15447,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13674,7 +15456,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13683,7 +15465,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -13692,7 +15474,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13701,7 +15483,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -13710,7 +15492,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13719,7 +15501,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13728,7 +15510,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13737,7 +15519,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { +void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13746,7 +15528,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { +void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13755,7 +15537,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { +void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13764,7 +15546,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { +void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13773,7 +15555,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13782,7 +15564,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13791,7 +15573,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13800,7 +15582,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13809,7 +15591,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13818,7 +15600,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13827,7 +15609,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13836,7 +15618,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13845,7 +15627,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -13854,7 +15636,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13863,7 +15645,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -13872,7 +15654,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13881,7 +15663,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13890,7 +15672,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13899,7 +15681,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -13908,7 +15690,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -13917,7 +15699,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -13926,7 +15708,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -13935,7 +15717,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13944,7 +15726,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); } @@ -13953,7 +15735,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13962,7 +15744,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); } @@ -13971,7 +15753,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -13980,7 +15762,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -13989,7 +15771,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -13998,7 +15780,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14007,7 +15789,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14016,7 +15798,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14025,7 +15807,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14034,7 +15816,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -14043,7 +15825,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); } @@ -14052,7 +15834,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14061,7 +15843,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); } @@ -14070,7 +15852,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14079,7 +15861,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -14088,7 +15870,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14097,7 +15879,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14106,7 +15888,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14115,7 +15897,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14124,7 +15906,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14133,7 +15915,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14142,7 +15924,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); } @@ -14151,7 +15933,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14160,7 +15942,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); } @@ -14169,7 +15951,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -14178,7 +15960,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -14187,7 +15969,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14196,7 +15978,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { +void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); } @@ -14205,7 +15987,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { +void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } @@ -14214,7 +15996,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { +void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } @@ -14223,7 +16005,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { +void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } @@ -14232,7 +16014,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } @@ -14241,7 +16023,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { +void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); } @@ -14250,7 +16032,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { +void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); } @@ -14259,7 +16041,7 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { +void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -549,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwaddu_vv(op1, op2, vl); } @@ -568,8 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { return vwaddu_wv(op1, op2, vl); } @@ -587,8 +585,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwaddu_vv(op1, op2, vl); } @@ -606,8 +603,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { return vwaddu_wv(op1, op2, vl); } @@ -769,8 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwaddu_vv(op1, op2, vl); } @@ -788,8 +783,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vwaddu_wv(op1, op2, vl); } @@ -807,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwaddu_vv(op1, op2, vl); } @@ -952,8 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwaddu_vv(op1, op2, vl); } @@ -1097,1225 +1089,2158 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl, size_t ta) { return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -9,7 +9,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { +vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { +vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { +vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -36,7 +36,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { +vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -45,7 +45,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { +vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -54,7 +54,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { +vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -63,7 +63,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -72,7 +72,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -81,7 +81,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { +vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -90,7 +90,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { +vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -99,7 +99,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { +vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -108,7 +108,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { +vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -117,7 +117,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { +vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -126,7 +126,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { +vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -135,7 +135,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { +vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -144,7 +144,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { +vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -153,7 +153,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { +vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -162,7 +162,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { +vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { +vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -189,7 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { +vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -198,7 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { +vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { +vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -216,7 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { +vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -225,7 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { +vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -234,7 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { +vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) { return vwcvt_x(src, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { +vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { +vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -261,7 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { +vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) { return vwcvtu_x(src, vl); } @@ -279,8 +279,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( @@ -288,8 +288,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( @@ -297,8 +297,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( @@ -306,8 +306,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( @@ -315,8 +315,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( @@ -324,8 +324,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( @@ -333,8 +333,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( @@ -342,8 +342,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( @@ -351,8 +351,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( @@ -360,8 +360,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( @@ -369,8 +369,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( @@ -378,8 +378,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( @@ -387,8 +387,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( @@ -396,8 +396,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( @@ -405,8 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( @@ -414,8 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( @@ -423,8 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( @@ -432,8 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( @@ -441,8 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( @@ -450,8 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( @@ -459,8 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( @@ -468,8 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( @@ -477,8 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( @@ -486,8 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( @@ -495,8 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( @@ -504,8 +504,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { + return vwcvt_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( @@ -513,8 +513,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( @@ -522,8 +522,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( @@ -531,8 +531,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( @@ -540,7 +540,277 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { + return vwcvtu_x(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwcvt_x_x_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwcvt_x_x_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwcvt_x_x_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwcvt_x_x_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwcvt_x_x_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwcvt_x_x_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwcvtu_x_x_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwcvtu_x_x_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwcvtu_x_x_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwcvtu_x_x_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwcvt_x_x_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwcvt_x_x_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwcvt_x_x_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwcvt_x_x_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwcvt_x_x_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwcvtu_x_x_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwcvtu_x_x_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwcvtu_x_x_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwcvtu_x_x_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwcvt_x_x_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwcvt_x_x_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwcvt_x_x_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl, size_t ta) { + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwcvtu_x_x_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwcvtu_x_x_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl, size_t ta) { + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwcvtu_x_x_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl, size_t ta) { return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmul(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( @@ -1585,7 +1585,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( @@ -1594,7 +1594,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( @@ -1603,7 +1603,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( @@ -1612,7 +1612,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( @@ -1621,5 +1621,816 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vwmulsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmul_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmul_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmul_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmul_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmul_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmul_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmul_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmul_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmul_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmul_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmul_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmul_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmul_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmul_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmul_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmul_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmul_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmul_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmul_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmul_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmul_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmul_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmul_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmul_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmul_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmul_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmul_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmul_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwmulu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwmulu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwmulu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwmulu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwmulu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwmulu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwmulu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwmulu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwmulu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwmulu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwmulu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwmulu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwmulu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwmulu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwmulu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwmulu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwmulu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwmulu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwmulu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwmulu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwmulu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwmulu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwmulu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwmulu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwmulu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwmulu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwmulu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwmulu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmulsu_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmulsu_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmulsu_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmulsu_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmulsu_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmulsu_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmulsu_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmulsu_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmulsu_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmulsu_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmulsu_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmulsu_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmulsu_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmulsu_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmulsu_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmulsu_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmulsu_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmulsu_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmulsu_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmulsu_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmulsu_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmulsu_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmulsu_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmulsu_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmulsu_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmulsu_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmulsu_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmulsu_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -549,8 +549,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwsubu_vv(op1, op2, vl); } @@ -568,8 +567,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { return vwsubu_wv(op1, op2, vl); } @@ -587,8 +585,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwsubu_vv(op1, op2, vl); } @@ -606,8 +603,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { return vwsubu_wv(op1, op2, vl); } @@ -769,8 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwsubu_vv(op1, op2, vl); } @@ -788,8 +783,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vwsubu_wv(op1, op2, vl); } @@ -807,8 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwsubu_vv(op1, op2, vl); } @@ -952,8 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwsubu_vv(op1, op2, vl); } @@ -1097,1225 +1089,2158 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl, size_t ta) { return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl, size_t ta) { return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl, size_t ta) { return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl, size_t ta) { return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -802,7 +802,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( @@ -811,7 +811,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( @@ -820,7 +820,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( @@ -829,7 +829,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( @@ -838,7 +838,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( @@ -847,7 +847,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( @@ -856,7 +856,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( @@ -865,7 +865,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( @@ -874,7 +874,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( @@ -883,7 +883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( @@ -892,7 +892,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( @@ -901,7 +901,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( @@ -910,7 +910,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( @@ -919,7 +919,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( @@ -928,7 +928,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( @@ -937,7 +937,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( @@ -946,7 +946,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( @@ -955,7 +955,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( @@ -964,7 +964,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( @@ -973,7 +973,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( @@ -982,7 +982,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( @@ -991,7 +991,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( @@ -1000,7 +1000,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( @@ -1009,7 +1009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( @@ -1018,7 +1018,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( @@ -1027,7 +1027,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( @@ -1036,7 +1036,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( @@ -1045,7 +1045,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( @@ -1054,7 +1054,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( @@ -1063,7 +1063,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( @@ -1072,7 +1072,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( @@ -1081,7 +1081,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( @@ -1090,7 +1090,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( @@ -1099,7 +1099,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( @@ -1108,7 +1108,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( @@ -1117,7 +1117,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( @@ -1126,7 +1126,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( @@ -1135,7 +1135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( @@ -1144,7 +1144,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( @@ -1153,7 +1153,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( @@ -1162,7 +1162,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( @@ -1171,7 +1171,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( @@ -1180,7 +1180,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( @@ -1189,7 +1189,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( @@ -1198,7 +1198,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( @@ -1207,7 +1207,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( @@ -1216,7 +1216,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( @@ -1225,7 +1225,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( @@ -1234,7 +1234,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( @@ -1243,7 +1243,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( @@ -1252,7 +1252,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( @@ -1261,7 +1261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( @@ -1270,7 +1270,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( @@ -1279,7 +1279,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( @@ -1288,7 +1288,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( @@ -1297,7 +1297,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( @@ -1306,7 +1306,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( @@ -1315,7 +1315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( @@ -1324,7 +1324,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( @@ -1333,7 +1333,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( @@ -1342,7 +1342,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( @@ -1360,7 +1360,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( @@ -1369,7 +1369,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( @@ -1378,7 +1378,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( @@ -1387,7 +1387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( @@ -1396,7 +1396,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( @@ -1405,7 +1405,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( @@ -1414,7 +1414,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( @@ -1423,7 +1423,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( @@ -1432,7 +1432,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( @@ -1441,7 +1441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( @@ -1450,7 +1450,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( @@ -1459,7 +1459,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( @@ -1468,7 +1468,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( @@ -1486,7 +1486,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( @@ -1495,7 +1495,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( @@ -1504,7 +1504,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( @@ -1513,7 +1513,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( @@ -1522,7 +1522,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( @@ -1531,7 +1531,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( @@ -1540,7 +1540,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( @@ -1549,7 +1549,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( @@ -1558,7 +1558,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( @@ -1567,7 +1567,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( @@ -1576,7 +1576,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); + return vxor(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( @@ -1585,5 +1585,798 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) { return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) { + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -261,9 +261,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( @@ -271,9 +270,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( @@ -281,9 +279,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( @@ -291,9 +288,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( @@ -301,9 +297,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( @@ -311,9 +306,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( @@ -321,9 +315,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint8mf8_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( @@ -331,9 +324,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint8mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( @@ -341,9 +333,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint8mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( @@ -351,9 +342,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint8m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( @@ -361,9 +351,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint8m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( @@ -371,9 +360,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint8mf8_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return vzext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( @@ -381,9 +369,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint8mf4_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return vzext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( @@ -391,9 +378,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint8mf2_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return vzext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( @@ -401,9 +387,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint8m1_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return vzext_vf8(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( @@ -411,9 +396,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( @@ -421,9 +405,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( @@ -431,9 +414,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( @@ -441,9 +423,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( @@ -451,9 +432,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( @@ -461,9 +441,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint16mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( @@ -471,9 +450,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint16mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( @@ -481,9 +459,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint16m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( @@ -491,9 +468,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint16m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return vzext_vf4(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( @@ -501,9 +477,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( @@ -511,9 +486,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( @@ -521,9 +495,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( @@ -531,7 +504,259 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { + return vzext_vf2(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vzext_vf2_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vzext_vf2_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vzext_vf2_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vzext_vf2_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vzext_vf2_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vzext_vf2_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl, size_t ta) { return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl, size_t ta) { + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl, size_t ta) { + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl, size_t ta) { + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl, size_t ta) { + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf2_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf2_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf2_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf2_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf2_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl, size_t ta) { + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf2_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf2_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf2_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl, size_t ta) { + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c @@ -42,3 +42,30 @@ vint8m1_t test_vadd_ta(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_generic_vadd( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_generic_vadd(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_generic_vadd_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_generic_vadd_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_generic_vadd_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_generic_vadd_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_UNDISTURBED); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -158,6 +158,7 @@ bool HasMaskedOffOperand; bool HasVL; bool HasPolicy; + bool IsMaskPolicyIntrinsic; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -173,7 +174,8 @@ RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, - bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, + bool HasPolicy, bool IsMaskPolicyIntrinsic, + bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, StringRef RequiredExtension, unsigned NF); @@ -185,6 +187,7 @@ bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } + bool isMaskPolicyIntrinsic() const { return IsMaskPolicyIntrinsic; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -201,9 +204,6 @@ // init the RVVIntrinsic ID and IntrinsicTypes. void emitCodeGenSwitchBody(raw_ostream &o) const; - // Emit the define macors for mask intrinsics using _mt intrinsics. - void emitIntrinsicMaskMacro(raw_ostream &o) const; - // Emit the macros for mapping C/C++ intrinsic function to builtin functions. void emitIntrinsicMacro(raw_ostream &o) const; @@ -764,14 +764,16 @@ StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, + bool IsMaskPolicyIntrinsic, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, StringRef RequiredExtension, unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), - HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), - HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { + HasPolicy(HasPolicy), IsMaskPolicyIntrinsic(IsMaskPolicyIntrinsic), + HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), + ManualCodegen(ManualCodegen.str()), NF(NF) { // Init Name and MangledName Name = NewName.str(); @@ -785,7 +787,7 @@ MangledName += "_" + MangledSuffix.str(); if (IsMask) { Name += "_m"; - if (HasPolicy) + if (IsMaskPolicyIntrinsic) Name += "t"; } // Init RISC-V extensions @@ -839,9 +841,15 @@ if (isMask()) { if (hasVL()) { - if (hasPolicy()) - OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; - else + if (hasPolicy()) { + if (isMaskPolicyIntrinsic()) + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; + else { + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), " + "TAIL_AGNOSTIC));\n"; + } + } else OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; @@ -882,24 +890,6 @@ OS << ")\n"; } -void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const { - OS << "#define " << getName().drop_back() << "("; - if (!InputTypes.empty()) { - ListSeparator LS; - for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) - OS << LS << "op" << i; - } - OS << ") \\\n"; - OS << "__builtin_rvv_" << getName() << "("; - ListSeparator LS; - if (!InputTypes.empty()) { - for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) - OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; - } - OS << LS << "(size_t)VE_TAIL_AGNOSTIC"; - OS << ")\n"; -} - void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { OS << "__attribute__((clang_builtin_alias("; OS << "__builtin_rvv_" << getName() << ")))\n"; @@ -1014,12 +1004,6 @@ Inst.emitIntrinsicMacro(OS); }); - // Use _mt to implement _m intrinsics. - emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { - if (Inst.isMask() && Inst.hasPolicy()) - Inst.emitIntrinsicMaskMacro(OS); - }); - OS << "#define __riscv_v_intrinsic_overloading 1\n"; // Print Overloaded APIs @@ -1065,7 +1049,17 @@ std::stable_sort(Defs.begin(), Defs.end(), [](const std::unique_ptr &A, const std::unique_ptr &B) { - return A->getIRName() < B->getIRName(); + int Cmp = A->getIRName().compare(B->getIRName()); + if (Cmp != 0) + return Cmp < 0; + // Some mask intrinsics use the same IRName as unmasked. + // Sort the unmasked intrinsics first. + if (A->isMask() != B->isMask()) + return A->isMask() < B->isMask(); + // _m and _mt intrinsics use the same IRName. + // Sort the _m intrinsics first. + return A->isMaskPolicyIntrinsic() < + B->isMaskPolicyIntrinsic(); }); // Print switch body when the ir name or ManualCodegen changes from previous // iteration. @@ -1073,7 +1067,8 @@ for (auto &Def : Defs) { StringRef CurIRName = Def->getIRName(); if (CurIRName != PrevDef->getIRName() || - (Def->getManualCodegen() != PrevDef->getManualCodegen())) { + (Def->getManualCodegen() != PrevDef->getManualCodegen()) || + (Def->isMaskPolicyIntrinsic() != PrevDef->isMaskPolicyIntrinsic())) { PrevDef->emitCodeGenSwitchBody(OS); } PrevDef = Def.get(); @@ -1183,8 +1178,9 @@ ProtoMaskSeq.push_back("z"); } + SmallVector ProtoMaskPolicySeq = ProtoMaskSeq; if (HasPolicy) { - ProtoMaskSeq.push_back("Kz"); + ProtoMaskPolicySeq.push_back("Kz"); } // Create Intrinsics for each type and LMUL. @@ -1201,8 +1197,9 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, - HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, - Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); + HasVL, HasPolicy, /*IsMaskPolicyIntrinsic*/ false, + HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), + IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1210,8 +1207,18 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, - HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); + HasPolicy, /*IsMaskPolicyIntrinsic=*/false, HasNoMaskedOverloaded, + HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), + IntrinsicTypes, RequiredExtension, NF)); + if (HasPolicy) { + MaskTypes = computeTypes(I, Log2LMUL, NF, ProtoMaskPolicySeq); + Out.push_back(std::make_unique( + Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, + HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, + HasPolicy, /*IsMaskPolicyIntrinsic=*/true, + HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, + MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); + } } } // end for Log2LMULList } // end for TypeRange